1 // SPDX-License-Identifier: GPL-2.0-only
6 * Inode handling routines for the OSTA-UDF(tm) filesystem.
9 * (C) 1998 Dave Boynton
10 * (C) 1998-2004 Ben Fennema
11 * (C) 1999-2000 Stelias Computing Inc
15 * 10/04/98 dgb Added rudimentary directory functions
16 * 10/07/98 Fully working udf_block_map! It works!
17 * 11/25/98 bmap altered to better support extents
18 * 12/06/98 blf partition support in udf_iget, udf_block_map
20 * 12/12/98 rewrote udf_block_map to handle next extents and descs across
21 * block boundaries (which is not actually allowed)
22 * 12/20/98 added support for strategy 4096
23 * 03/07/99 rewrote udf_block_map (again)
24 * New funcs, inode_bmap, udf_next_aext
25 * 04/19/99 Support for writing device EA's for major/minor #
30 #include <linux/module.h>
31 #include <linux/pagemap.h>
32 #include <linux/writeback.h>
33 #include <linux/slab.h>
34 #include <linux/crc-itu-t.h>
35 #include <linux/mpage.h>
36 #include <linux/uio.h>
37 #include <linux/bio.h>
42 #define EXTENT_MERGE_SIZE 5
44 #define FE_MAPPED_PERMS (FE_PERM_U_READ | FE_PERM_U_WRITE | FE_PERM_U_EXEC | \
45 FE_PERM_G_READ | FE_PERM_G_WRITE | FE_PERM_G_EXEC | \
46 FE_PERM_O_READ | FE_PERM_O_WRITE | FE_PERM_O_EXEC)
48 #define FE_DELETE_PERMS (FE_PERM_U_DELETE | FE_PERM_G_DELETE | \
53 static umode_t
udf_convert_permissions(struct fileEntry
*);
54 static int udf_update_inode(struct inode
*, int);
55 static int udf_sync_inode(struct inode
*inode
);
56 static int udf_alloc_i_data(struct inode
*inode
, size_t size
);
57 static int inode_getblk(struct inode
*inode
, struct udf_map_rq
*map
);
58 static int udf_insert_aext(struct inode
*, struct extent_position
,
59 struct kernel_lb_addr
, uint32_t);
60 static void udf_split_extents(struct inode
*, int *, int, udf_pblk_t
,
61 struct kernel_long_ad
*, int *);
62 static void udf_prealloc_extents(struct inode
*, int, int,
63 struct kernel_long_ad
*, int *);
64 static void udf_merge_extents(struct inode
*, struct kernel_long_ad
*, int *);
65 static int udf_update_extents(struct inode
*, struct kernel_long_ad
*, int,
66 int, struct extent_position
*);
67 static int udf_get_block_wb(struct inode
*inode
, sector_t block
,
68 struct buffer_head
*bh_result
, int create
);
70 static void __udf_clear_extent_cache(struct inode
*inode
)
72 struct udf_inode_info
*iinfo
= UDF_I(inode
);
74 if (iinfo
->cached_extent
.lstart
!= -1) {
75 brelse(iinfo
->cached_extent
.epos
.bh
);
76 iinfo
->cached_extent
.lstart
= -1;
80 /* Invalidate extent cache */
81 static void udf_clear_extent_cache(struct inode
*inode
)
83 struct udf_inode_info
*iinfo
= UDF_I(inode
);
85 spin_lock(&iinfo
->i_extent_cache_lock
);
86 __udf_clear_extent_cache(inode
);
87 spin_unlock(&iinfo
->i_extent_cache_lock
);
90 /* Return contents of extent cache */
91 static int udf_read_extent_cache(struct inode
*inode
, loff_t bcount
,
92 loff_t
*lbcount
, struct extent_position
*pos
)
94 struct udf_inode_info
*iinfo
= UDF_I(inode
);
97 spin_lock(&iinfo
->i_extent_cache_lock
);
98 if ((iinfo
->cached_extent
.lstart
<= bcount
) &&
99 (iinfo
->cached_extent
.lstart
!= -1)) {
101 *lbcount
= iinfo
->cached_extent
.lstart
;
102 memcpy(pos
, &iinfo
->cached_extent
.epos
,
103 sizeof(struct extent_position
));
108 spin_unlock(&iinfo
->i_extent_cache_lock
);
112 /* Add extent to extent cache */
113 static void udf_update_extent_cache(struct inode
*inode
, loff_t estart
,
114 struct extent_position
*pos
)
116 struct udf_inode_info
*iinfo
= UDF_I(inode
);
118 spin_lock(&iinfo
->i_extent_cache_lock
);
119 /* Invalidate previously cached extent */
120 __udf_clear_extent_cache(inode
);
123 memcpy(&iinfo
->cached_extent
.epos
, pos
, sizeof(*pos
));
124 iinfo
->cached_extent
.lstart
= estart
;
125 switch (iinfo
->i_alloc_type
) {
126 case ICBTAG_FLAG_AD_SHORT
:
127 iinfo
->cached_extent
.epos
.offset
-= sizeof(struct short_ad
);
129 case ICBTAG_FLAG_AD_LONG
:
130 iinfo
->cached_extent
.epos
.offset
-= sizeof(struct long_ad
);
133 spin_unlock(&iinfo
->i_extent_cache_lock
);
136 void udf_evict_inode(struct inode
*inode
)
138 struct udf_inode_info
*iinfo
= UDF_I(inode
);
141 if (!is_bad_inode(inode
)) {
142 if (!inode
->i_nlink
) {
144 udf_setsize(inode
, 0);
145 udf_update_inode(inode
, IS_SYNC(inode
));
147 if (iinfo
->i_alloc_type
!= ICBTAG_FLAG_AD_IN_ICB
&&
148 inode
->i_size
!= iinfo
->i_lenExtents
) {
149 udf_warn(inode
->i_sb
,
150 "Inode %lu (mode %o) has inode size %llu different from extent length %llu. Filesystem need not be standards compliant.\n",
151 inode
->i_ino
, inode
->i_mode
,
152 (unsigned long long)inode
->i_size
,
153 (unsigned long long)iinfo
->i_lenExtents
);
156 truncate_inode_pages_final(&inode
->i_data
);
157 invalidate_inode_buffers(inode
);
159 kfree(iinfo
->i_data
);
160 iinfo
->i_data
= NULL
;
161 udf_clear_extent_cache(inode
);
163 udf_free_inode(inode
);
167 static void udf_write_failed(struct address_space
*mapping
, loff_t to
)
169 struct inode
*inode
= mapping
->host
;
170 struct udf_inode_info
*iinfo
= UDF_I(inode
);
171 loff_t isize
= inode
->i_size
;
174 truncate_pagecache(inode
, isize
);
175 if (iinfo
->i_alloc_type
!= ICBTAG_FLAG_AD_IN_ICB
) {
176 down_write(&iinfo
->i_data_sem
);
177 udf_clear_extent_cache(inode
);
178 udf_truncate_extents(inode
);
179 up_write(&iinfo
->i_data_sem
);
184 static int udf_adinicb_writepage(struct folio
*folio
,
185 struct writeback_control
*wbc
, void *data
)
187 struct inode
*inode
= folio
->mapping
->host
;
188 struct udf_inode_info
*iinfo
= UDF_I(inode
);
190 BUG_ON(!folio_test_locked(folio
));
191 BUG_ON(folio
->index
!= 0);
192 memcpy_from_file_folio(iinfo
->i_data
+ iinfo
->i_lenEAttr
, folio
, 0,
195 mark_inode_dirty(inode
);
200 static int udf_writepages(struct address_space
*mapping
,
201 struct writeback_control
*wbc
)
203 struct inode
*inode
= mapping
->host
;
204 struct udf_inode_info
*iinfo
= UDF_I(inode
);
206 if (iinfo
->i_alloc_type
!= ICBTAG_FLAG_AD_IN_ICB
)
207 return mpage_writepages(mapping
, wbc
, udf_get_block_wb
);
208 return write_cache_pages(mapping
, wbc
, udf_adinicb_writepage
, NULL
);
211 static void udf_adinicb_read_folio(struct folio
*folio
)
213 struct inode
*inode
= folio
->mapping
->host
;
214 struct udf_inode_info
*iinfo
= UDF_I(inode
);
215 loff_t isize
= i_size_read(inode
);
217 folio_fill_tail(folio
, 0, iinfo
->i_data
+ iinfo
->i_lenEAttr
, isize
);
218 folio_mark_uptodate(folio
);
221 static int udf_read_folio(struct file
*file
, struct folio
*folio
)
223 struct udf_inode_info
*iinfo
= UDF_I(file_inode(file
));
225 if (iinfo
->i_alloc_type
== ICBTAG_FLAG_AD_IN_ICB
) {
226 udf_adinicb_read_folio(folio
);
230 return mpage_read_folio(folio
, udf_get_block
);
233 static void udf_readahead(struct readahead_control
*rac
)
235 struct udf_inode_info
*iinfo
= UDF_I(rac
->mapping
->host
);
238 * No readahead needed for in-ICB files and udf_get_block() would get
239 * confused for such file anyway.
241 if (iinfo
->i_alloc_type
== ICBTAG_FLAG_AD_IN_ICB
)
244 mpage_readahead(rac
, udf_get_block
);
247 static int udf_write_begin(struct file
*file
, struct address_space
*mapping
,
248 loff_t pos
, unsigned len
,
249 struct folio
**foliop
, void **fsdata
)
251 struct udf_inode_info
*iinfo
= UDF_I(file_inode(file
));
255 if (iinfo
->i_alloc_type
!= ICBTAG_FLAG_AD_IN_ICB
) {
256 ret
= block_write_begin(mapping
, pos
, len
, foliop
,
259 udf_write_failed(mapping
, pos
+ len
);
262 if (WARN_ON_ONCE(pos
>= PAGE_SIZE
))
264 folio
= __filemap_get_folio(mapping
, 0, FGP_WRITEBEGIN
,
265 mapping_gfp_mask(mapping
));
267 return PTR_ERR(folio
);
269 if (!folio_test_uptodate(folio
))
270 udf_adinicb_read_folio(folio
);
274 static int udf_write_end(struct file
*file
, struct address_space
*mapping
,
275 loff_t pos
, unsigned len
, unsigned copied
,
276 struct folio
*folio
, void *fsdata
)
278 struct inode
*inode
= file_inode(file
);
281 if (UDF_I(inode
)->i_alloc_type
!= ICBTAG_FLAG_AD_IN_ICB
)
282 return generic_write_end(file
, mapping
, pos
, len
, copied
, folio
,
284 last_pos
= pos
+ copied
;
285 if (last_pos
> inode
->i_size
)
286 i_size_write(inode
, last_pos
);
287 folio_mark_dirty(folio
);
294 static ssize_t
udf_direct_IO(struct kiocb
*iocb
, struct iov_iter
*iter
)
296 struct file
*file
= iocb
->ki_filp
;
297 struct address_space
*mapping
= file
->f_mapping
;
298 struct inode
*inode
= mapping
->host
;
299 size_t count
= iov_iter_count(iter
);
302 /* Fallback to buffered IO for in-ICB files */
303 if (UDF_I(inode
)->i_alloc_type
== ICBTAG_FLAG_AD_IN_ICB
)
305 ret
= blockdev_direct_IO(iocb
, inode
, iter
, udf_get_block
);
306 if (unlikely(ret
< 0 && iov_iter_rw(iter
) == WRITE
))
307 udf_write_failed(mapping
, iocb
->ki_pos
+ count
);
311 static sector_t
udf_bmap(struct address_space
*mapping
, sector_t block
)
313 struct udf_inode_info
*iinfo
= UDF_I(mapping
->host
);
315 if (iinfo
->i_alloc_type
== ICBTAG_FLAG_AD_IN_ICB
)
317 return generic_block_bmap(mapping
, block
, udf_get_block
);
320 const struct address_space_operations udf_aops
= {
321 .dirty_folio
= block_dirty_folio
,
322 .invalidate_folio
= block_invalidate_folio
,
323 .read_folio
= udf_read_folio
,
324 .readahead
= udf_readahead
,
325 .writepages
= udf_writepages
,
326 .write_begin
= udf_write_begin
,
327 .write_end
= udf_write_end
,
328 .direct_IO
= udf_direct_IO
,
330 .migrate_folio
= buffer_migrate_folio
,
334 * Expand file stored in ICB to a normal one-block-file
336 * This function requires i_mutex held
338 int udf_expand_file_adinicb(struct inode
*inode
)
341 struct udf_inode_info
*iinfo
= UDF_I(inode
);
344 WARN_ON_ONCE(!inode_is_locked(inode
));
345 if (!iinfo
->i_lenAlloc
) {
346 down_write(&iinfo
->i_data_sem
);
347 if (UDF_QUERY_FLAG(inode
->i_sb
, UDF_FLAG_USE_SHORT_AD
))
348 iinfo
->i_alloc_type
= ICBTAG_FLAG_AD_SHORT
;
350 iinfo
->i_alloc_type
= ICBTAG_FLAG_AD_LONG
;
351 up_write(&iinfo
->i_data_sem
);
352 mark_inode_dirty(inode
);
356 folio
= __filemap_get_folio(inode
->i_mapping
, 0,
357 FGP_LOCK
| FGP_ACCESSED
| FGP_CREAT
, GFP_KERNEL
);
359 return PTR_ERR(folio
);
361 if (!folio_test_uptodate(folio
))
362 udf_adinicb_read_folio(folio
);
363 down_write(&iinfo
->i_data_sem
);
364 memset(iinfo
->i_data
+ iinfo
->i_lenEAttr
, 0x00,
366 iinfo
->i_lenAlloc
= 0;
367 if (UDF_QUERY_FLAG(inode
->i_sb
, UDF_FLAG_USE_SHORT_AD
))
368 iinfo
->i_alloc_type
= ICBTAG_FLAG_AD_SHORT
;
370 iinfo
->i_alloc_type
= ICBTAG_FLAG_AD_LONG
;
371 folio_mark_dirty(folio
);
373 up_write(&iinfo
->i_data_sem
);
374 err
= filemap_fdatawrite(inode
->i_mapping
);
376 /* Restore everything back so that we don't lose data... */
378 down_write(&iinfo
->i_data_sem
);
379 memcpy_from_folio(iinfo
->i_data
+ iinfo
->i_lenEAttr
,
380 folio
, 0, inode
->i_size
);
382 iinfo
->i_alloc_type
= ICBTAG_FLAG_AD_IN_ICB
;
383 iinfo
->i_lenAlloc
= inode
->i_size
;
384 up_write(&iinfo
->i_data_sem
);
387 mark_inode_dirty(inode
);
392 #define UDF_MAP_CREATE 0x01 /* Mapping can allocate new blocks */
393 #define UDF_MAP_NOPREALLOC 0x02 /* Do not preallocate blocks */
395 #define UDF_BLK_MAPPED 0x01 /* Block was successfully mapped */
396 #define UDF_BLK_NEW 0x02 /* Block was freshly allocated */
401 int iflags
; /* UDF_MAP_ flags determining behavior */
402 int oflags
; /* UDF_BLK_ flags reporting results */
405 static int udf_map_block(struct inode
*inode
, struct udf_map_rq
*map
)
408 struct udf_inode_info
*iinfo
= UDF_I(inode
);
410 if (WARN_ON_ONCE(iinfo
->i_alloc_type
== ICBTAG_FLAG_AD_IN_ICB
))
411 return -EFSCORRUPTED
;
414 if (!(map
->iflags
& UDF_MAP_CREATE
)) {
415 struct kernel_lb_addr eloc
;
418 struct extent_position epos
= {};
420 down_read(&iinfo
->i_data_sem
);
421 if (inode_bmap(inode
, map
->lblk
, &epos
, &eloc
, &elen
, &offset
)
422 == (EXT_RECORDED_ALLOCATED
>> 30)) {
423 map
->pblk
= udf_get_lb_pblock(inode
->i_sb
, &eloc
,
425 map
->oflags
|= UDF_BLK_MAPPED
;
427 up_read(&iinfo
->i_data_sem
);
433 down_write(&iinfo
->i_data_sem
);
435 * Block beyond EOF and prealloc extents? Just discard preallocation
436 * as it is not useful and complicates things.
438 if (((loff_t
)map
->lblk
) << inode
->i_blkbits
>= iinfo
->i_lenExtents
)
439 udf_discard_prealloc(inode
);
440 udf_clear_extent_cache(inode
);
441 err
= inode_getblk(inode
, map
);
442 up_write(&iinfo
->i_data_sem
);
446 static int __udf_get_block(struct inode
*inode
, sector_t block
,
447 struct buffer_head
*bh_result
, int flags
)
450 struct udf_map_rq map
= {
455 err
= udf_map_block(inode
, &map
);
458 if (map
.oflags
& UDF_BLK_MAPPED
) {
459 map_bh(bh_result
, inode
->i_sb
, map
.pblk
);
460 if (map
.oflags
& UDF_BLK_NEW
)
461 set_buffer_new(bh_result
);
466 int udf_get_block(struct inode
*inode
, sector_t block
,
467 struct buffer_head
*bh_result
, int create
)
469 int flags
= create
? UDF_MAP_CREATE
: 0;
472 * We preallocate blocks only for regular files. It also makes sense
473 * for directories but there's a problem when to drop the
474 * preallocation. We might use some delayed work for that but I feel
475 * it's overengineering for a filesystem like UDF.
477 if (!S_ISREG(inode
->i_mode
))
478 flags
|= UDF_MAP_NOPREALLOC
;
479 return __udf_get_block(inode
, block
, bh_result
, flags
);
483 * We shouldn't be allocating blocks on page writeback since we allocate them
484 * on page fault. We can spot dirty buffers without allocated blocks though
485 * when truncate expands file. These however don't have valid data so we can
486 * safely ignore them. So never allocate blocks from page writeback.
488 static int udf_get_block_wb(struct inode
*inode
, sector_t block
,
489 struct buffer_head
*bh_result
, int create
)
491 return __udf_get_block(inode
, block
, bh_result
, 0);
494 /* Extend the file with new blocks totaling 'new_block_bytes',
495 * return the number of extents added
497 static int udf_do_extend_file(struct inode
*inode
,
498 struct extent_position
*last_pos
,
499 struct kernel_long_ad
*last_ext
,
500 loff_t new_block_bytes
)
503 int count
= 0, fake
= !(last_ext
->extLength
& UDF_EXTENT_LENGTH_MASK
);
504 struct super_block
*sb
= inode
->i_sb
;
505 struct udf_inode_info
*iinfo
;
508 /* The previous extent is fake and we should not extend by anything
509 * - there's nothing to do... */
510 if (!new_block_bytes
&& fake
)
513 iinfo
= UDF_I(inode
);
514 /* Round the last extent up to a multiple of block size */
515 if (last_ext
->extLength
& (sb
->s_blocksize
- 1)) {
516 last_ext
->extLength
=
517 (last_ext
->extLength
& UDF_EXTENT_FLAG_MASK
) |
518 (((last_ext
->extLength
& UDF_EXTENT_LENGTH_MASK
) +
519 sb
->s_blocksize
- 1) & ~(sb
->s_blocksize
- 1));
520 iinfo
->i_lenExtents
=
521 (iinfo
->i_lenExtents
+ sb
->s_blocksize
- 1) &
522 ~(sb
->s_blocksize
- 1);
526 /* Can we merge with the previous extent? */
527 if ((last_ext
->extLength
& UDF_EXTENT_FLAG_MASK
) ==
528 EXT_NOT_RECORDED_NOT_ALLOCATED
) {
529 add
= (1 << 30) - sb
->s_blocksize
-
530 (last_ext
->extLength
& UDF_EXTENT_LENGTH_MASK
);
531 if (add
> new_block_bytes
)
532 add
= new_block_bytes
;
533 new_block_bytes
-= add
;
534 last_ext
->extLength
+= add
;
538 err
= udf_add_aext(inode
, last_pos
, &last_ext
->extLocation
,
539 last_ext
->extLength
, 1);
544 struct kernel_lb_addr tmploc
;
547 udf_write_aext(inode
, last_pos
, &last_ext
->extLocation
,
548 last_ext
->extLength
, 1);
551 * We've rewritten the last extent. If we are going to add
552 * more extents, we may need to enter possible following
553 * empty indirect extent.
556 udf_next_aext(inode
, last_pos
, &tmploc
, &tmplen
, 0);
558 iinfo
->i_lenExtents
+= add
;
560 /* Managed to do everything necessary? */
561 if (!new_block_bytes
)
564 /* All further extents will be NOT_RECORDED_NOT_ALLOCATED */
565 last_ext
->extLocation
.logicalBlockNum
= 0;
566 last_ext
->extLocation
.partitionReferenceNum
= 0;
567 add
= (1 << 30) - sb
->s_blocksize
;
568 last_ext
->extLength
= EXT_NOT_RECORDED_NOT_ALLOCATED
| add
;
570 /* Create enough extents to cover the whole hole */
571 while (new_block_bytes
> add
) {
572 new_block_bytes
-= add
;
573 err
= udf_add_aext(inode
, last_pos
, &last_ext
->extLocation
,
574 last_ext
->extLength
, 1);
577 iinfo
->i_lenExtents
+= add
;
580 if (new_block_bytes
) {
581 last_ext
->extLength
= EXT_NOT_RECORDED_NOT_ALLOCATED
|
583 err
= udf_add_aext(inode
, last_pos
, &last_ext
->extLocation
,
584 last_ext
->extLength
, 1);
587 iinfo
->i_lenExtents
+= new_block_bytes
;
592 /* last_pos should point to the last written extent... */
593 if (iinfo
->i_alloc_type
== ICBTAG_FLAG_AD_SHORT
)
594 last_pos
->offset
-= sizeof(struct short_ad
);
595 else if (iinfo
->i_alloc_type
== ICBTAG_FLAG_AD_LONG
)
596 last_pos
->offset
-= sizeof(struct long_ad
);
602 /* Remove extents we've created so far */
603 udf_clear_extent_cache(inode
);
604 udf_truncate_extents(inode
);
608 /* Extend the final block of the file to final_block_len bytes */
609 static void udf_do_extend_final_block(struct inode
*inode
,
610 struct extent_position
*last_pos
,
611 struct kernel_long_ad
*last_ext
,
614 uint32_t added_bytes
;
617 * Extent already large enough? It may be already rounded up to block
620 if (new_elen
<= (last_ext
->extLength
& UDF_EXTENT_LENGTH_MASK
))
622 added_bytes
= new_elen
- (last_ext
->extLength
& UDF_EXTENT_LENGTH_MASK
);
623 last_ext
->extLength
+= added_bytes
;
624 UDF_I(inode
)->i_lenExtents
+= added_bytes
;
626 udf_write_aext(inode
, last_pos
, &last_ext
->extLocation
,
627 last_ext
->extLength
, 1);
630 static int udf_extend_file(struct inode
*inode
, loff_t newsize
)
633 struct extent_position epos
;
634 struct kernel_lb_addr eloc
;
637 struct super_block
*sb
= inode
->i_sb
;
638 sector_t first_block
= newsize
>> sb
->s_blocksize_bits
, offset
;
641 struct udf_inode_info
*iinfo
= UDF_I(inode
);
642 struct kernel_long_ad extent
;
644 bool within_last_ext
;
646 if (iinfo
->i_alloc_type
== ICBTAG_FLAG_AD_SHORT
)
647 adsize
= sizeof(struct short_ad
);
648 else if (iinfo
->i_alloc_type
== ICBTAG_FLAG_AD_LONG
)
649 adsize
= sizeof(struct long_ad
);
653 down_write(&iinfo
->i_data_sem
);
655 * When creating hole in file, just don't bother with preserving
656 * preallocation. It likely won't be very useful anyway.
658 udf_discard_prealloc(inode
);
660 etype
= inode_bmap(inode
, first_block
, &epos
, &eloc
, &elen
, &offset
);
661 within_last_ext
= (etype
!= -1);
662 /* We don't expect extents past EOF... */
663 WARN_ON_ONCE(within_last_ext
&&
664 elen
> ((loff_t
)offset
+ 1) << inode
->i_blkbits
);
666 if ((!epos
.bh
&& epos
.offset
== udf_file_entry_alloc_offset(inode
)) ||
667 (epos
.bh
&& epos
.offset
== sizeof(struct allocExtDesc
))) {
668 /* File has no extents at all or has empty last
669 * indirect extent! Create a fake extent... */
670 extent
.extLocation
.logicalBlockNum
= 0;
671 extent
.extLocation
.partitionReferenceNum
= 0;
672 extent
.extLength
= EXT_NOT_RECORDED_NOT_ALLOCATED
;
674 epos
.offset
-= adsize
;
675 etype
= udf_next_aext(inode
, &epos
, &extent
.extLocation
,
676 &extent
.extLength
, 0);
677 extent
.extLength
|= etype
<< 30;
680 new_elen
= ((loff_t
)offset
<< inode
->i_blkbits
) |
681 (newsize
& (sb
->s_blocksize
- 1));
683 /* File has extent covering the new size (could happen when extending
686 if (within_last_ext
) {
687 /* Extending file within the last file block */
688 udf_do_extend_final_block(inode
, &epos
, &extent
, new_elen
);
690 err
= udf_do_extend_file(inode
, &epos
, &extent
, new_elen
);
698 up_write(&iinfo
->i_data_sem
);
702 static int inode_getblk(struct inode
*inode
, struct udf_map_rq
*map
)
704 struct kernel_long_ad laarr
[EXTENT_MERGE_SIZE
];
705 struct extent_position prev_epos
, cur_epos
, next_epos
;
706 int count
= 0, startnum
= 0, endnum
= 0;
707 uint32_t elen
= 0, tmpelen
;
708 struct kernel_lb_addr eloc
, tmpeloc
;
710 loff_t lbcount
= 0, b_off
= 0;
711 udf_pblk_t newblocknum
;
714 struct udf_inode_info
*iinfo
= UDF_I(inode
);
715 udf_pblk_t goal
= 0, pgoal
= iinfo
->i_location
.logicalBlockNum
;
720 prev_epos
.offset
= udf_file_entry_alloc_offset(inode
);
721 prev_epos
.block
= iinfo
->i_location
;
723 cur_epos
= next_epos
= prev_epos
;
724 b_off
= (loff_t
)map
->lblk
<< inode
->i_sb
->s_blocksize_bits
;
726 /* find the extent which contains the block we are looking for.
727 alternate between laarr[0] and laarr[1] for locations of the
728 current extent, and the previous extent */
730 if (prev_epos
.bh
!= cur_epos
.bh
) {
731 brelse(prev_epos
.bh
);
733 prev_epos
.bh
= cur_epos
.bh
;
735 if (cur_epos
.bh
!= next_epos
.bh
) {
737 get_bh(next_epos
.bh
);
738 cur_epos
.bh
= next_epos
.bh
;
743 prev_epos
.block
= cur_epos
.block
;
744 cur_epos
.block
= next_epos
.block
;
746 prev_epos
.offset
= cur_epos
.offset
;
747 cur_epos
.offset
= next_epos
.offset
;
749 etype
= udf_next_aext(inode
, &next_epos
, &eloc
, &elen
, 1);
755 laarr
[c
].extLength
= (etype
<< 30) | elen
;
756 laarr
[c
].extLocation
= eloc
;
758 if (etype
!= (EXT_NOT_RECORDED_NOT_ALLOCATED
>> 30))
759 pgoal
= eloc
.logicalBlockNum
+
760 ((elen
+ inode
->i_sb
->s_blocksize
- 1) >>
761 inode
->i_sb
->s_blocksize_bits
);
764 } while (lbcount
+ elen
<= b_off
);
767 offset
= b_off
>> inode
->i_sb
->s_blocksize_bits
;
769 * Move prev_epos and cur_epos into indirect extent if we are at
772 udf_next_aext(inode
, &prev_epos
, &tmpeloc
, &tmpelen
, 0);
773 udf_next_aext(inode
, &cur_epos
, &tmpeloc
, &tmpelen
, 0);
775 /* if the extent is allocated and recorded, return the block
776 if the extent is not a multiple of the blocksize, round up */
778 if (etype
== (EXT_RECORDED_ALLOCATED
>> 30)) {
779 if (elen
& (inode
->i_sb
->s_blocksize
- 1)) {
780 elen
= EXT_RECORDED_ALLOCATED
|
781 ((elen
+ inode
->i_sb
->s_blocksize
- 1) &
782 ~(inode
->i_sb
->s_blocksize
- 1));
783 iinfo
->i_lenExtents
=
784 ALIGN(iinfo
->i_lenExtents
,
785 inode
->i_sb
->s_blocksize
);
786 udf_write_aext(inode
, &cur_epos
, &eloc
, elen
, 1);
788 map
->oflags
= UDF_BLK_MAPPED
;
789 map
->pblk
= udf_get_lb_pblock(inode
->i_sb
, &eloc
, offset
);
793 /* Are we beyond EOF and preallocated extent? */
803 /* Create a fake extent when there's not one */
804 memset(&laarr
[0].extLocation
, 0x00,
805 sizeof(struct kernel_lb_addr
));
806 laarr
[0].extLength
= EXT_NOT_RECORDED_NOT_ALLOCATED
;
807 /* Will udf_do_extend_file() create real extent from
809 startnum
= (offset
> 0);
811 /* Create extents for the hole between EOF and offset */
812 hole_len
= (loff_t
)offset
<< inode
->i_blkbits
;
813 ret
= udf_do_extend_file(inode
, &prev_epos
, laarr
, hole_len
);
820 * Is there any real extent? - otherwise we overwrite the fake
825 laarr
[c
].extLength
= EXT_NOT_RECORDED_NOT_ALLOCATED
|
826 inode
->i_sb
->s_blocksize
;
827 memset(&laarr
[c
].extLocation
, 0x00,
828 sizeof(struct kernel_lb_addr
));
834 endnum
= startnum
= ((count
> 2) ? 2 : count
);
836 /* if the current extent is in position 0,
837 swap it with the previous */
838 if (!c
&& count
!= 1) {
845 /* if the current block is located in an extent,
846 read the next extent */
847 etype
= udf_next_aext(inode
, &next_epos
, &eloc
, &elen
, 0);
849 laarr
[c
+ 1].extLength
= (etype
<< 30) | elen
;
850 laarr
[c
+ 1].extLocation
= eloc
;
858 /* if the current extent is not recorded but allocated, get the
859 * block in the extent corresponding to the requested block */
860 if ((laarr
[c
].extLength
>> 30) == (EXT_NOT_RECORDED_ALLOCATED
>> 30))
861 newblocknum
= laarr
[c
].extLocation
.logicalBlockNum
+ offset
;
862 else { /* otherwise, allocate a new block */
863 if (iinfo
->i_next_alloc_block
== map
->lblk
)
864 goal
= iinfo
->i_next_alloc_goal
;
867 if (!(goal
= pgoal
)) /* XXX: what was intended here? */
868 goal
= iinfo
->i_location
.logicalBlockNum
+ 1;
871 newblocknum
= udf_new_block(inode
->i_sb
, inode
,
872 iinfo
->i_location
.partitionReferenceNum
,
877 iinfo
->i_lenExtents
+= inode
->i_sb
->s_blocksize
;
880 /* if the extent the requsted block is located in contains multiple
881 * blocks, split the extent into at most three extents. blocks prior
882 * to requested block, requested block, and blocks after requested
884 udf_split_extents(inode
, &c
, offset
, newblocknum
, laarr
, &endnum
);
886 if (!(map
->iflags
& UDF_MAP_NOPREALLOC
))
887 udf_prealloc_extents(inode
, c
, lastblock
, laarr
, &endnum
);
889 /* merge any continuous blocks in laarr */
890 udf_merge_extents(inode
, laarr
, &endnum
);
892 /* write back the new extents, inserting new extents if the new number
893 * of extents is greater than the old number, and deleting extents if
894 * the new number of extents is less than the old number */
895 ret
= udf_update_extents(inode
, laarr
, startnum
, endnum
, &prev_epos
);
899 map
->pblk
= udf_get_pblock(inode
->i_sb
, newblocknum
,
900 iinfo
->i_location
.partitionReferenceNum
, 0);
905 map
->oflags
= UDF_BLK_NEW
| UDF_BLK_MAPPED
;
906 iinfo
->i_next_alloc_block
= map
->lblk
+ 1;
907 iinfo
->i_next_alloc_goal
= newblocknum
+ 1;
908 inode_set_ctime_current(inode
);
911 udf_sync_inode(inode
);
913 mark_inode_dirty(inode
);
916 brelse(prev_epos
.bh
);
918 brelse(next_epos
.bh
);
922 static void udf_split_extents(struct inode
*inode
, int *c
, int offset
,
923 udf_pblk_t newblocknum
,
924 struct kernel_long_ad
*laarr
, int *endnum
)
926 unsigned long blocksize
= inode
->i_sb
->s_blocksize
;
927 unsigned char blocksize_bits
= inode
->i_sb
->s_blocksize_bits
;
929 if ((laarr
[*c
].extLength
>> 30) == (EXT_NOT_RECORDED_ALLOCATED
>> 30) ||
930 (laarr
[*c
].extLength
>> 30) ==
931 (EXT_NOT_RECORDED_NOT_ALLOCATED
>> 30)) {
933 int blen
= ((laarr
[curr
].extLength
& UDF_EXTENT_LENGTH_MASK
) +
934 blocksize
- 1) >> blocksize_bits
;
935 int8_t etype
= (laarr
[curr
].extLength
>> 30);
939 else if (!offset
|| blen
== offset
+ 1) {
940 laarr
[curr
+ 2] = laarr
[curr
+ 1];
941 laarr
[curr
+ 1] = laarr
[curr
];
943 laarr
[curr
+ 3] = laarr
[curr
+ 1];
944 laarr
[curr
+ 2] = laarr
[curr
+ 1] = laarr
[curr
];
948 if (etype
== (EXT_NOT_RECORDED_ALLOCATED
>> 30)) {
949 udf_free_blocks(inode
->i_sb
, inode
,
950 &laarr
[curr
].extLocation
,
952 laarr
[curr
].extLength
=
953 EXT_NOT_RECORDED_NOT_ALLOCATED
|
954 (offset
<< blocksize_bits
);
955 laarr
[curr
].extLocation
.logicalBlockNum
= 0;
956 laarr
[curr
].extLocation
.
957 partitionReferenceNum
= 0;
959 laarr
[curr
].extLength
= (etype
<< 30) |
960 (offset
<< blocksize_bits
);
966 laarr
[curr
].extLocation
.logicalBlockNum
= newblocknum
;
967 if (etype
== (EXT_NOT_RECORDED_NOT_ALLOCATED
>> 30))
968 laarr
[curr
].extLocation
.partitionReferenceNum
=
969 UDF_I(inode
)->i_location
.partitionReferenceNum
;
970 laarr
[curr
].extLength
= EXT_RECORDED_ALLOCATED
|
974 if (blen
!= offset
+ 1) {
975 if (etype
== (EXT_NOT_RECORDED_ALLOCATED
>> 30))
976 laarr
[curr
].extLocation
.logicalBlockNum
+=
978 laarr
[curr
].extLength
= (etype
<< 30) |
979 ((blen
- (offset
+ 1)) << blocksize_bits
);
986 static void udf_prealloc_extents(struct inode
*inode
, int c
, int lastblock
,
987 struct kernel_long_ad
*laarr
,
990 int start
, length
= 0, currlength
= 0, i
;
992 if (*endnum
>= (c
+ 1)) {
998 if ((laarr
[c
+ 1].extLength
>> 30) ==
999 (EXT_NOT_RECORDED_ALLOCATED
>> 30)) {
1001 length
= currlength
=
1002 (((laarr
[c
+ 1].extLength
&
1003 UDF_EXTENT_LENGTH_MASK
) +
1004 inode
->i_sb
->s_blocksize
- 1) >>
1005 inode
->i_sb
->s_blocksize_bits
);
1010 for (i
= start
+ 1; i
<= *endnum
; i
++) {
1013 length
+= UDF_DEFAULT_PREALLOC_BLOCKS
;
1014 } else if ((laarr
[i
].extLength
>> 30) ==
1015 (EXT_NOT_RECORDED_NOT_ALLOCATED
>> 30)) {
1016 length
+= (((laarr
[i
].extLength
&
1017 UDF_EXTENT_LENGTH_MASK
) +
1018 inode
->i_sb
->s_blocksize
- 1) >>
1019 inode
->i_sb
->s_blocksize_bits
);
1025 int next
= laarr
[start
].extLocation
.logicalBlockNum
+
1026 (((laarr
[start
].extLength
& UDF_EXTENT_LENGTH_MASK
) +
1027 inode
->i_sb
->s_blocksize
- 1) >>
1028 inode
->i_sb
->s_blocksize_bits
);
1029 int numalloc
= udf_prealloc_blocks(inode
->i_sb
, inode
,
1030 laarr
[start
].extLocation
.partitionReferenceNum
,
1031 next
, (UDF_DEFAULT_PREALLOC_BLOCKS
> length
?
1032 length
: UDF_DEFAULT_PREALLOC_BLOCKS
) -
1035 if (start
== (c
+ 1))
1036 laarr
[start
].extLength
+=
1038 inode
->i_sb
->s_blocksize_bits
);
1040 memmove(&laarr
[c
+ 2], &laarr
[c
+ 1],
1041 sizeof(struct long_ad
) * (*endnum
- (c
+ 1)));
1043 laarr
[c
+ 1].extLocation
.logicalBlockNum
= next
;
1044 laarr
[c
+ 1].extLocation
.partitionReferenceNum
=
1045 laarr
[c
].extLocation
.
1046 partitionReferenceNum
;
1047 laarr
[c
+ 1].extLength
=
1048 EXT_NOT_RECORDED_ALLOCATED
|
1050 inode
->i_sb
->s_blocksize_bits
);
1054 for (i
= start
+ 1; numalloc
&& i
< *endnum
; i
++) {
1055 int elen
= ((laarr
[i
].extLength
&
1056 UDF_EXTENT_LENGTH_MASK
) +
1057 inode
->i_sb
->s_blocksize
- 1) >>
1058 inode
->i_sb
->s_blocksize_bits
;
1060 if (elen
> numalloc
) {
1061 laarr
[i
].extLength
-=
1063 inode
->i_sb
->s_blocksize_bits
);
1067 if (*endnum
> (i
+ 1))
1070 sizeof(struct long_ad
) *
1071 (*endnum
- (i
+ 1)));
1076 UDF_I(inode
)->i_lenExtents
+=
1077 numalloc
<< inode
->i_sb
->s_blocksize_bits
;
1082 static void udf_merge_extents(struct inode
*inode
, struct kernel_long_ad
*laarr
,
1086 unsigned long blocksize
= inode
->i_sb
->s_blocksize
;
1087 unsigned char blocksize_bits
= inode
->i_sb
->s_blocksize_bits
;
1089 for (i
= 0; i
< (*endnum
- 1); i
++) {
1090 struct kernel_long_ad
*li
/*l[i]*/ = &laarr
[i
];
1091 struct kernel_long_ad
*lip1
/*l[i plus 1]*/ = &laarr
[i
+ 1];
1093 if (((li
->extLength
>> 30) == (lip1
->extLength
>> 30)) &&
1094 (((li
->extLength
>> 30) ==
1095 (EXT_NOT_RECORDED_NOT_ALLOCATED
>> 30)) ||
1096 ((lip1
->extLocation
.logicalBlockNum
-
1097 li
->extLocation
.logicalBlockNum
) ==
1098 (((li
->extLength
& UDF_EXTENT_LENGTH_MASK
) +
1099 blocksize
- 1) >> blocksize_bits
)))) {
1101 if (((li
->extLength
& UDF_EXTENT_LENGTH_MASK
) +
1102 (lip1
->extLength
& UDF_EXTENT_LENGTH_MASK
) +
1103 blocksize
- 1) <= UDF_EXTENT_LENGTH_MASK
) {
1104 li
->extLength
= lip1
->extLength
+
1106 UDF_EXTENT_LENGTH_MASK
) +
1107 blocksize
- 1) & ~(blocksize
- 1));
1108 if (*endnum
> (i
+ 2))
1109 memmove(&laarr
[i
+ 1], &laarr
[i
+ 2],
1110 sizeof(struct long_ad
) *
1111 (*endnum
- (i
+ 2)));
1115 } else if (((li
->extLength
>> 30) ==
1116 (EXT_NOT_RECORDED_ALLOCATED
>> 30)) &&
1117 ((lip1
->extLength
>> 30) ==
1118 (EXT_NOT_RECORDED_NOT_ALLOCATED
>> 30))) {
1119 udf_free_blocks(inode
->i_sb
, inode
, &li
->extLocation
, 0,
1121 UDF_EXTENT_LENGTH_MASK
) +
1122 blocksize
- 1) >> blocksize_bits
);
1123 li
->extLocation
.logicalBlockNum
= 0;
1124 li
->extLocation
.partitionReferenceNum
= 0;
1126 if (((li
->extLength
& UDF_EXTENT_LENGTH_MASK
) +
1127 (lip1
->extLength
& UDF_EXTENT_LENGTH_MASK
) +
1128 blocksize
- 1) & ~UDF_EXTENT_LENGTH_MASK
) {
1129 lip1
->extLength
= (lip1
->extLength
-
1131 UDF_EXTENT_LENGTH_MASK
) +
1132 UDF_EXTENT_LENGTH_MASK
) &
1134 li
->extLength
= (li
->extLength
&
1135 UDF_EXTENT_FLAG_MASK
) +
1136 (UDF_EXTENT_LENGTH_MASK
+ 1) -
1139 li
->extLength
= lip1
->extLength
+
1141 UDF_EXTENT_LENGTH_MASK
) +
1142 blocksize
- 1) & ~(blocksize
- 1));
1143 if (*endnum
> (i
+ 2))
1144 memmove(&laarr
[i
+ 1], &laarr
[i
+ 2],
1145 sizeof(struct long_ad
) *
1146 (*endnum
- (i
+ 2)));
1150 } else if ((li
->extLength
>> 30) ==
1151 (EXT_NOT_RECORDED_ALLOCATED
>> 30)) {
1152 udf_free_blocks(inode
->i_sb
, inode
,
1153 &li
->extLocation
, 0,
1155 UDF_EXTENT_LENGTH_MASK
) +
1156 blocksize
- 1) >> blocksize_bits
);
1157 li
->extLocation
.logicalBlockNum
= 0;
1158 li
->extLocation
.partitionReferenceNum
= 0;
1159 li
->extLength
= (li
->extLength
&
1160 UDF_EXTENT_LENGTH_MASK
) |
1161 EXT_NOT_RECORDED_NOT_ALLOCATED
;
1166 static int udf_update_extents(struct inode
*inode
, struct kernel_long_ad
*laarr
,
1167 int startnum
, int endnum
,
1168 struct extent_position
*epos
)
1171 struct kernel_lb_addr tmploc
;
1175 if (startnum
> endnum
) {
1176 for (i
= 0; i
< (startnum
- endnum
); i
++)
1177 udf_delete_aext(inode
, *epos
);
1178 } else if (startnum
< endnum
) {
1179 for (i
= 0; i
< (endnum
- startnum
); i
++) {
1180 err
= udf_insert_aext(inode
, *epos
,
1181 laarr
[i
].extLocation
,
1182 laarr
[i
].extLength
);
1184 * If we fail here, we are likely corrupting the extent
1185 * list and leaking blocks. At least stop early to
1190 udf_next_aext(inode
, epos
, &laarr
[i
].extLocation
,
1191 &laarr
[i
].extLength
, 1);
1196 for (i
= start
; i
< endnum
; i
++) {
1197 udf_next_aext(inode
, epos
, &tmploc
, &tmplen
, 0);
1198 udf_write_aext(inode
, epos
, &laarr
[i
].extLocation
,
1199 laarr
[i
].extLength
, 1);
1204 struct buffer_head
*udf_bread(struct inode
*inode
, udf_pblk_t block
,
1205 int create
, int *err
)
1207 struct buffer_head
*bh
= NULL
;
1208 struct udf_map_rq map
= {
1210 .iflags
= UDF_MAP_NOPREALLOC
| (create
? UDF_MAP_CREATE
: 0),
1213 *err
= udf_map_block(inode
, &map
);
1214 if (*err
|| !(map
.oflags
& UDF_BLK_MAPPED
))
1217 bh
= sb_getblk(inode
->i_sb
, map
.pblk
);
1222 if (map
.oflags
& UDF_BLK_NEW
) {
1224 memset(bh
->b_data
, 0x00, inode
->i_sb
->s_blocksize
);
1225 set_buffer_uptodate(bh
);
1227 mark_buffer_dirty_inode(bh
, inode
);
1231 if (bh_read(bh
, 0) >= 0)
1239 int udf_setsize(struct inode
*inode
, loff_t newsize
)
1242 struct udf_inode_info
*iinfo
;
1243 unsigned int bsize
= i_blocksize(inode
);
1245 if (!(S_ISREG(inode
->i_mode
) || S_ISDIR(inode
->i_mode
) ||
1246 S_ISLNK(inode
->i_mode
)))
1249 iinfo
= UDF_I(inode
);
1250 if (newsize
> inode
->i_size
) {
1251 if (iinfo
->i_alloc_type
== ICBTAG_FLAG_AD_IN_ICB
) {
1253 (udf_file_entry_alloc_offset(inode
) + newsize
)) {
1254 down_write(&iinfo
->i_data_sem
);
1255 iinfo
->i_lenAlloc
= newsize
;
1256 up_write(&iinfo
->i_data_sem
);
1259 err
= udf_expand_file_adinicb(inode
);
1263 err
= udf_extend_file(inode
, newsize
);
1267 truncate_setsize(inode
, newsize
);
1269 if (iinfo
->i_alloc_type
== ICBTAG_FLAG_AD_IN_ICB
) {
1270 down_write(&iinfo
->i_data_sem
);
1271 udf_clear_extent_cache(inode
);
1272 memset(iinfo
->i_data
+ iinfo
->i_lenEAttr
+ newsize
,
1273 0x00, bsize
- newsize
-
1274 udf_file_entry_alloc_offset(inode
));
1275 iinfo
->i_lenAlloc
= newsize
;
1276 truncate_setsize(inode
, newsize
);
1277 up_write(&iinfo
->i_data_sem
);
1280 err
= block_truncate_page(inode
->i_mapping
, newsize
,
1284 truncate_setsize(inode
, newsize
);
1285 down_write(&iinfo
->i_data_sem
);
1286 udf_clear_extent_cache(inode
);
1287 err
= udf_truncate_extents(inode
);
1288 up_write(&iinfo
->i_data_sem
);
1293 inode_set_mtime_to_ts(inode
, inode_set_ctime_current(inode
));
1295 udf_sync_inode(inode
);
1297 mark_inode_dirty(inode
);
1302 * Maximum length of linked list formed by ICB hierarchy. The chosen number is
1303 * arbitrary - just that we hopefully don't limit any real use of rewritten
1304 * inode on write-once media but avoid looping for too long on corrupted media.
1306 #define UDF_MAX_ICB_NESTING 1024
1308 static int udf_read_inode(struct inode
*inode
, bool hidden_inode
)
1310 struct buffer_head
*bh
= NULL
;
1311 struct fileEntry
*fe
;
1312 struct extendedFileEntry
*efe
;
1314 struct udf_inode_info
*iinfo
= UDF_I(inode
);
1315 struct udf_sb_info
*sbi
= UDF_SB(inode
->i_sb
);
1316 struct kernel_lb_addr
*iloc
= &iinfo
->i_location
;
1317 unsigned int link_count
;
1318 unsigned int indirections
= 0;
1319 int bs
= inode
->i_sb
->s_blocksize
;
1322 struct timespec64 ts
;
1325 if (iloc
->partitionReferenceNum
>= sbi
->s_partitions
) {
1326 udf_debug("partition reference: %u > logical volume partitions: %u\n",
1327 iloc
->partitionReferenceNum
, sbi
->s_partitions
);
1331 if (iloc
->logicalBlockNum
>=
1332 sbi
->s_partmaps
[iloc
->partitionReferenceNum
].s_partition_len
) {
1333 udf_debug("block=%u, partition=%u out of range\n",
1334 iloc
->logicalBlockNum
, iloc
->partitionReferenceNum
);
1339 * Set defaults, but the inode is still incomplete!
1340 * Note: get_new_inode() sets the following on a new inode:
1343 * i_flags = sb->s_flags
1345 * clean_inode(): zero fills and sets
1350 bh
= udf_read_ptagged(inode
->i_sb
, iloc
, 0, &ident
);
1352 udf_err(inode
->i_sb
, "(ino %lu) failed !bh\n", inode
->i_ino
);
1356 if (ident
!= TAG_IDENT_FE
&& ident
!= TAG_IDENT_EFE
&&
1357 ident
!= TAG_IDENT_USE
) {
1358 udf_err(inode
->i_sb
, "(ino %lu) failed ident=%u\n",
1359 inode
->i_ino
, ident
);
1363 fe
= (struct fileEntry
*)bh
->b_data
;
1364 efe
= (struct extendedFileEntry
*)bh
->b_data
;
1366 if (fe
->icbTag
.strategyType
== cpu_to_le16(4096)) {
1367 struct buffer_head
*ibh
;
1369 ibh
= udf_read_ptagged(inode
->i_sb
, iloc
, 1, &ident
);
1370 if (ident
== TAG_IDENT_IE
&& ibh
) {
1371 struct kernel_lb_addr loc
;
1372 struct indirectEntry
*ie
;
1374 ie
= (struct indirectEntry
*)ibh
->b_data
;
1375 loc
= lelb_to_cpu(ie
->indirectICB
.extLocation
);
1377 if (ie
->indirectICB
.extLength
) {
1379 memcpy(&iinfo
->i_location
, &loc
,
1380 sizeof(struct kernel_lb_addr
));
1381 if (++indirections
> UDF_MAX_ICB_NESTING
) {
1382 udf_err(inode
->i_sb
,
1383 "too many ICBs in ICB hierarchy"
1384 " (max %d supported)\n",
1385 UDF_MAX_ICB_NESTING
);
1393 } else if (fe
->icbTag
.strategyType
!= cpu_to_le16(4)) {
1394 udf_err(inode
->i_sb
, "unsupported strategy type: %u\n",
1395 le16_to_cpu(fe
->icbTag
.strategyType
));
1398 if (fe
->icbTag
.strategyType
== cpu_to_le16(4))
1399 iinfo
->i_strat4096
= 0;
1400 else /* if (fe->icbTag.strategyType == cpu_to_le16(4096)) */
1401 iinfo
->i_strat4096
= 1;
1403 iinfo
->i_alloc_type
= le16_to_cpu(fe
->icbTag
.flags
) &
1404 ICBTAG_FLAG_AD_MASK
;
1405 if (iinfo
->i_alloc_type
!= ICBTAG_FLAG_AD_SHORT
&&
1406 iinfo
->i_alloc_type
!= ICBTAG_FLAG_AD_LONG
&&
1407 iinfo
->i_alloc_type
!= ICBTAG_FLAG_AD_IN_ICB
) {
1411 iinfo
->i_hidden
= hidden_inode
;
1412 iinfo
->i_unique
= 0;
1413 iinfo
->i_lenEAttr
= 0;
1414 iinfo
->i_lenExtents
= 0;
1415 iinfo
->i_lenAlloc
= 0;
1416 iinfo
->i_next_alloc_block
= 0;
1417 iinfo
->i_next_alloc_goal
= 0;
1418 if (fe
->descTag
.tagIdent
== cpu_to_le16(TAG_IDENT_EFE
)) {
1421 ret
= udf_alloc_i_data(inode
, bs
-
1422 sizeof(struct extendedFileEntry
));
1425 memcpy(iinfo
->i_data
,
1426 bh
->b_data
+ sizeof(struct extendedFileEntry
),
1427 bs
- sizeof(struct extendedFileEntry
));
1428 } else if (fe
->descTag
.tagIdent
== cpu_to_le16(TAG_IDENT_FE
)) {
1431 ret
= udf_alloc_i_data(inode
, bs
- sizeof(struct fileEntry
));
1434 memcpy(iinfo
->i_data
,
1435 bh
->b_data
+ sizeof(struct fileEntry
),
1436 bs
- sizeof(struct fileEntry
));
1437 } else if (fe
->descTag
.tagIdent
== cpu_to_le16(TAG_IDENT_USE
)) {
1440 iinfo
->i_lenAlloc
= le32_to_cpu(
1441 ((struct unallocSpaceEntry
*)bh
->b_data
)->
1443 ret
= udf_alloc_i_data(inode
, bs
-
1444 sizeof(struct unallocSpaceEntry
));
1447 memcpy(iinfo
->i_data
,
1448 bh
->b_data
+ sizeof(struct unallocSpaceEntry
),
1449 bs
- sizeof(struct unallocSpaceEntry
));
1454 read_lock(&sbi
->s_cred_lock
);
1455 uid
= le32_to_cpu(fe
->uid
);
1456 if (uid
== UDF_INVALID_ID
||
1457 UDF_QUERY_FLAG(inode
->i_sb
, UDF_FLAG_UID_SET
))
1458 inode
->i_uid
= sbi
->s_uid
;
1460 i_uid_write(inode
, uid
);
1462 gid
= le32_to_cpu(fe
->gid
);
1463 if (gid
== UDF_INVALID_ID
||
1464 UDF_QUERY_FLAG(inode
->i_sb
, UDF_FLAG_GID_SET
))
1465 inode
->i_gid
= sbi
->s_gid
;
1467 i_gid_write(inode
, gid
);
1469 if (fe
->icbTag
.fileType
!= ICBTAG_FILE_TYPE_DIRECTORY
&&
1470 sbi
->s_fmode
!= UDF_INVALID_MODE
)
1471 inode
->i_mode
= sbi
->s_fmode
;
1472 else if (fe
->icbTag
.fileType
== ICBTAG_FILE_TYPE_DIRECTORY
&&
1473 sbi
->s_dmode
!= UDF_INVALID_MODE
)
1474 inode
->i_mode
= sbi
->s_dmode
;
1476 inode
->i_mode
= udf_convert_permissions(fe
);
1477 inode
->i_mode
&= ~sbi
->s_umask
;
1478 iinfo
->i_extraPerms
= le32_to_cpu(fe
->permissions
) & ~FE_MAPPED_PERMS
;
1480 read_unlock(&sbi
->s_cred_lock
);
1482 link_count
= le16_to_cpu(fe
->fileLinkCount
);
1484 if (!hidden_inode
) {
1490 set_nlink(inode
, link_count
);
1492 inode
->i_size
= le64_to_cpu(fe
->informationLength
);
1493 iinfo
->i_lenExtents
= inode
->i_size
;
1495 if (iinfo
->i_efe
== 0) {
1496 inode
->i_blocks
= le64_to_cpu(fe
->logicalBlocksRecorded
) <<
1497 (inode
->i_sb
->s_blocksize_bits
- 9);
1499 udf_disk_stamp_to_time(&ts
, fe
->accessTime
);
1500 inode_set_atime_to_ts(inode
, ts
);
1501 udf_disk_stamp_to_time(&ts
, fe
->modificationTime
);
1502 inode_set_mtime_to_ts(inode
, ts
);
1503 udf_disk_stamp_to_time(&ts
, fe
->attrTime
);
1504 inode_set_ctime_to_ts(inode
, ts
);
1506 iinfo
->i_unique
= le64_to_cpu(fe
->uniqueID
);
1507 iinfo
->i_lenEAttr
= le32_to_cpu(fe
->lengthExtendedAttr
);
1508 iinfo
->i_lenAlloc
= le32_to_cpu(fe
->lengthAllocDescs
);
1509 iinfo
->i_checkpoint
= le32_to_cpu(fe
->checkpoint
);
1510 iinfo
->i_streamdir
= 0;
1511 iinfo
->i_lenStreams
= 0;
1513 inode
->i_blocks
= le64_to_cpu(efe
->logicalBlocksRecorded
) <<
1514 (inode
->i_sb
->s_blocksize_bits
- 9);
1516 udf_disk_stamp_to_time(&ts
, efe
->accessTime
);
1517 inode_set_atime_to_ts(inode
, ts
);
1518 udf_disk_stamp_to_time(&ts
, efe
->modificationTime
);
1519 inode_set_mtime_to_ts(inode
, ts
);
1520 udf_disk_stamp_to_time(&ts
, efe
->attrTime
);
1521 inode_set_ctime_to_ts(inode
, ts
);
1522 udf_disk_stamp_to_time(&iinfo
->i_crtime
, efe
->createTime
);
1524 iinfo
->i_unique
= le64_to_cpu(efe
->uniqueID
);
1525 iinfo
->i_lenEAttr
= le32_to_cpu(efe
->lengthExtendedAttr
);
1526 iinfo
->i_lenAlloc
= le32_to_cpu(efe
->lengthAllocDescs
);
1527 iinfo
->i_checkpoint
= le32_to_cpu(efe
->checkpoint
);
1530 iinfo
->i_streamdir
= (efe
->streamDirectoryICB
.extLength
!= 0);
1531 iinfo
->i_locStreamdir
=
1532 lelb_to_cpu(efe
->streamDirectoryICB
.extLocation
);
1533 iinfo
->i_lenStreams
= le64_to_cpu(efe
->objectSize
);
1534 if (iinfo
->i_lenStreams
>= inode
->i_size
)
1535 iinfo
->i_lenStreams
-= inode
->i_size
;
1537 iinfo
->i_lenStreams
= 0;
1539 inode
->i_generation
= iinfo
->i_unique
;
1542 * Sanity check length of allocation descriptors and extended attrs to
1543 * avoid integer overflows
1545 if (iinfo
->i_lenEAttr
> bs
|| iinfo
->i_lenAlloc
> bs
)
1547 /* Now do exact checks */
1548 if (udf_file_entry_alloc_offset(inode
) + iinfo
->i_lenAlloc
> bs
)
1550 /* Sanity checks for files in ICB so that we don't get confused later */
1551 if (iinfo
->i_alloc_type
== ICBTAG_FLAG_AD_IN_ICB
) {
1553 * For file in ICB data is stored in allocation descriptor
1554 * so sizes should match
1556 if (iinfo
->i_lenAlloc
!= inode
->i_size
)
1558 /* File in ICB has to fit in there... */
1559 if (inode
->i_size
> bs
- udf_file_entry_alloc_offset(inode
))
1563 switch (fe
->icbTag
.fileType
) {
1564 case ICBTAG_FILE_TYPE_DIRECTORY
:
1565 inode
->i_op
= &udf_dir_inode_operations
;
1566 inode
->i_fop
= &udf_dir_operations
;
1567 inode
->i_mode
|= S_IFDIR
;
1570 case ICBTAG_FILE_TYPE_REALTIME
:
1571 case ICBTAG_FILE_TYPE_REGULAR
:
1572 case ICBTAG_FILE_TYPE_UNDEF
:
1573 case ICBTAG_FILE_TYPE_VAT20
:
1574 inode
->i_data
.a_ops
= &udf_aops
;
1575 inode
->i_op
= &udf_file_inode_operations
;
1576 inode
->i_fop
= &udf_file_operations
;
1577 inode
->i_mode
|= S_IFREG
;
1579 case ICBTAG_FILE_TYPE_BLOCK
:
1580 inode
->i_mode
|= S_IFBLK
;
1582 case ICBTAG_FILE_TYPE_CHAR
:
1583 inode
->i_mode
|= S_IFCHR
;
1585 case ICBTAG_FILE_TYPE_FIFO
:
1586 init_special_inode(inode
, inode
->i_mode
| S_IFIFO
, 0);
1588 case ICBTAG_FILE_TYPE_SOCKET
:
1589 init_special_inode(inode
, inode
->i_mode
| S_IFSOCK
, 0);
1591 case ICBTAG_FILE_TYPE_SYMLINK
:
1592 inode
->i_data
.a_ops
= &udf_symlink_aops
;
1593 inode
->i_op
= &udf_symlink_inode_operations
;
1594 inode_nohighmem(inode
);
1595 inode
->i_mode
= S_IFLNK
| 0777;
1597 case ICBTAG_FILE_TYPE_MAIN
:
1598 udf_debug("METADATA FILE-----\n");
1600 case ICBTAG_FILE_TYPE_MIRROR
:
1601 udf_debug("METADATA MIRROR FILE-----\n");
1603 case ICBTAG_FILE_TYPE_BITMAP
:
1604 udf_debug("METADATA BITMAP FILE-----\n");
1607 udf_err(inode
->i_sb
, "(ino %lu) failed unknown file type=%u\n",
1608 inode
->i_ino
, fe
->icbTag
.fileType
);
1611 if (S_ISCHR(inode
->i_mode
) || S_ISBLK(inode
->i_mode
)) {
1612 struct deviceSpec
*dsea
=
1613 (struct deviceSpec
*)udf_get_extendedattr(inode
, 12, 1);
1615 init_special_inode(inode
, inode
->i_mode
,
1616 MKDEV(le32_to_cpu(dsea
->majorDeviceIdent
),
1617 le32_to_cpu(dsea
->minorDeviceIdent
)));
1618 /* Developer ID ??? */
1628 static int udf_alloc_i_data(struct inode
*inode
, size_t size
)
1630 struct udf_inode_info
*iinfo
= UDF_I(inode
);
1631 iinfo
->i_data
= kmalloc(size
, GFP_KERNEL
);
1637 static umode_t
udf_convert_permissions(struct fileEntry
*fe
)
1640 uint32_t permissions
;
1643 permissions
= le32_to_cpu(fe
->permissions
);
1644 flags
= le16_to_cpu(fe
->icbTag
.flags
);
1646 mode
= ((permissions
) & 0007) |
1647 ((permissions
>> 2) & 0070) |
1648 ((permissions
>> 4) & 0700) |
1649 ((flags
& ICBTAG_FLAG_SETUID
) ? S_ISUID
: 0) |
1650 ((flags
& ICBTAG_FLAG_SETGID
) ? S_ISGID
: 0) |
1651 ((flags
& ICBTAG_FLAG_STICKY
) ? S_ISVTX
: 0);
1656 void udf_update_extra_perms(struct inode
*inode
, umode_t mode
)
1658 struct udf_inode_info
*iinfo
= UDF_I(inode
);
1661 * UDF 2.01 sec. 3.3.3.3 Note 2:
1662 * In Unix, delete permission tracks write
1664 iinfo
->i_extraPerms
&= ~FE_DELETE_PERMS
;
1666 iinfo
->i_extraPerms
|= FE_PERM_U_DELETE
;
1668 iinfo
->i_extraPerms
|= FE_PERM_G_DELETE
;
1670 iinfo
->i_extraPerms
|= FE_PERM_O_DELETE
;
1673 int udf_write_inode(struct inode
*inode
, struct writeback_control
*wbc
)
1675 return udf_update_inode(inode
, wbc
->sync_mode
== WB_SYNC_ALL
);
1678 static int udf_sync_inode(struct inode
*inode
)
1680 return udf_update_inode(inode
, 1);
1683 static void udf_adjust_time(struct udf_inode_info
*iinfo
, struct timespec64 time
)
1685 if (iinfo
->i_crtime
.tv_sec
> time
.tv_sec
||
1686 (iinfo
->i_crtime
.tv_sec
== time
.tv_sec
&&
1687 iinfo
->i_crtime
.tv_nsec
> time
.tv_nsec
))
1688 iinfo
->i_crtime
= time
;
1691 static int udf_update_inode(struct inode
*inode
, int do_sync
)
1693 struct buffer_head
*bh
= NULL
;
1694 struct fileEntry
*fe
;
1695 struct extendedFileEntry
*efe
;
1696 uint64_t lb_recorded
;
1701 struct udf_sb_info
*sbi
= UDF_SB(inode
->i_sb
);
1702 unsigned char blocksize_bits
= inode
->i_sb
->s_blocksize_bits
;
1703 struct udf_inode_info
*iinfo
= UDF_I(inode
);
1705 bh
= sb_getblk(inode
->i_sb
,
1706 udf_get_lb_pblock(inode
->i_sb
, &iinfo
->i_location
, 0));
1708 udf_debug("getblk failure\n");
1713 memset(bh
->b_data
, 0, inode
->i_sb
->s_blocksize
);
1714 fe
= (struct fileEntry
*)bh
->b_data
;
1715 efe
= (struct extendedFileEntry
*)bh
->b_data
;
1718 struct unallocSpaceEntry
*use
=
1719 (struct unallocSpaceEntry
*)bh
->b_data
;
1721 use
->lengthAllocDescs
= cpu_to_le32(iinfo
->i_lenAlloc
);
1722 memcpy(bh
->b_data
+ sizeof(struct unallocSpaceEntry
),
1723 iinfo
->i_data
, inode
->i_sb
->s_blocksize
-
1724 sizeof(struct unallocSpaceEntry
));
1725 use
->descTag
.tagIdent
= cpu_to_le16(TAG_IDENT_USE
);
1726 crclen
= sizeof(struct unallocSpaceEntry
);
1731 if (UDF_QUERY_FLAG(inode
->i_sb
, UDF_FLAG_UID_FORGET
))
1732 fe
->uid
= cpu_to_le32(UDF_INVALID_ID
);
1734 fe
->uid
= cpu_to_le32(i_uid_read(inode
));
1736 if (UDF_QUERY_FLAG(inode
->i_sb
, UDF_FLAG_GID_FORGET
))
1737 fe
->gid
= cpu_to_le32(UDF_INVALID_ID
);
1739 fe
->gid
= cpu_to_le32(i_gid_read(inode
));
1741 udfperms
= ((inode
->i_mode
& 0007)) |
1742 ((inode
->i_mode
& 0070) << 2) |
1743 ((inode
->i_mode
& 0700) << 4);
1745 udfperms
|= iinfo
->i_extraPerms
;
1746 fe
->permissions
= cpu_to_le32(udfperms
);
1748 if (S_ISDIR(inode
->i_mode
) && inode
->i_nlink
> 0)
1749 fe
->fileLinkCount
= cpu_to_le16(inode
->i_nlink
- 1);
1751 if (iinfo
->i_hidden
)
1752 fe
->fileLinkCount
= cpu_to_le16(0);
1754 fe
->fileLinkCount
= cpu_to_le16(inode
->i_nlink
);
1757 fe
->informationLength
= cpu_to_le64(inode
->i_size
);
1759 if (S_ISCHR(inode
->i_mode
) || S_ISBLK(inode
->i_mode
)) {
1761 struct deviceSpec
*dsea
=
1762 (struct deviceSpec
*)udf_get_extendedattr(inode
, 12, 1);
1764 dsea
= (struct deviceSpec
*)
1765 udf_add_extendedattr(inode
,
1766 sizeof(struct deviceSpec
) +
1767 sizeof(struct regid
), 12, 0x3);
1768 dsea
->attrType
= cpu_to_le32(12);
1769 dsea
->attrSubtype
= 1;
1770 dsea
->attrLength
= cpu_to_le32(
1771 sizeof(struct deviceSpec
) +
1772 sizeof(struct regid
));
1773 dsea
->impUseLength
= cpu_to_le32(sizeof(struct regid
));
1775 eid
= (struct regid
*)dsea
->impUse
;
1776 memset(eid
, 0, sizeof(*eid
));
1777 strcpy(eid
->ident
, UDF_ID_DEVELOPER
);
1778 eid
->identSuffix
[0] = UDF_OS_CLASS_UNIX
;
1779 eid
->identSuffix
[1] = UDF_OS_ID_LINUX
;
1780 dsea
->majorDeviceIdent
= cpu_to_le32(imajor(inode
));
1781 dsea
->minorDeviceIdent
= cpu_to_le32(iminor(inode
));
1784 if (iinfo
->i_alloc_type
== ICBTAG_FLAG_AD_IN_ICB
)
1785 lb_recorded
= 0; /* No extents => no blocks! */
1788 (inode
->i_blocks
+ (1 << (blocksize_bits
- 9)) - 1) >>
1789 (blocksize_bits
- 9);
1791 if (iinfo
->i_efe
== 0) {
1792 memcpy(bh
->b_data
+ sizeof(struct fileEntry
),
1794 inode
->i_sb
->s_blocksize
- sizeof(struct fileEntry
));
1795 fe
->logicalBlocksRecorded
= cpu_to_le64(lb_recorded
);
1797 udf_time_to_disk_stamp(&fe
->accessTime
, inode_get_atime(inode
));
1798 udf_time_to_disk_stamp(&fe
->modificationTime
, inode_get_mtime(inode
));
1799 udf_time_to_disk_stamp(&fe
->attrTime
, inode_get_ctime(inode
));
1800 memset(&(fe
->impIdent
), 0, sizeof(struct regid
));
1801 strcpy(fe
->impIdent
.ident
, UDF_ID_DEVELOPER
);
1802 fe
->impIdent
.identSuffix
[0] = UDF_OS_CLASS_UNIX
;
1803 fe
->impIdent
.identSuffix
[1] = UDF_OS_ID_LINUX
;
1804 fe
->uniqueID
= cpu_to_le64(iinfo
->i_unique
);
1805 fe
->lengthExtendedAttr
= cpu_to_le32(iinfo
->i_lenEAttr
);
1806 fe
->lengthAllocDescs
= cpu_to_le32(iinfo
->i_lenAlloc
);
1807 fe
->checkpoint
= cpu_to_le32(iinfo
->i_checkpoint
);
1808 fe
->descTag
.tagIdent
= cpu_to_le16(TAG_IDENT_FE
);
1809 crclen
= sizeof(struct fileEntry
);
1811 memcpy(bh
->b_data
+ sizeof(struct extendedFileEntry
),
1813 inode
->i_sb
->s_blocksize
-
1814 sizeof(struct extendedFileEntry
));
1816 cpu_to_le64(inode
->i_size
+ iinfo
->i_lenStreams
);
1817 efe
->logicalBlocksRecorded
= cpu_to_le64(lb_recorded
);
1819 if (iinfo
->i_streamdir
) {
1820 struct long_ad
*icb_lad
= &efe
->streamDirectoryICB
;
1822 icb_lad
->extLocation
=
1823 cpu_to_lelb(iinfo
->i_locStreamdir
);
1824 icb_lad
->extLength
=
1825 cpu_to_le32(inode
->i_sb
->s_blocksize
);
1828 udf_adjust_time(iinfo
, inode_get_atime(inode
));
1829 udf_adjust_time(iinfo
, inode_get_mtime(inode
));
1830 udf_adjust_time(iinfo
, inode_get_ctime(inode
));
1832 udf_time_to_disk_stamp(&efe
->accessTime
,
1833 inode_get_atime(inode
));
1834 udf_time_to_disk_stamp(&efe
->modificationTime
,
1835 inode_get_mtime(inode
));
1836 udf_time_to_disk_stamp(&efe
->createTime
, iinfo
->i_crtime
);
1837 udf_time_to_disk_stamp(&efe
->attrTime
, inode_get_ctime(inode
));
1839 memset(&(efe
->impIdent
), 0, sizeof(efe
->impIdent
));
1840 strcpy(efe
->impIdent
.ident
, UDF_ID_DEVELOPER
);
1841 efe
->impIdent
.identSuffix
[0] = UDF_OS_CLASS_UNIX
;
1842 efe
->impIdent
.identSuffix
[1] = UDF_OS_ID_LINUX
;
1843 efe
->uniqueID
= cpu_to_le64(iinfo
->i_unique
);
1844 efe
->lengthExtendedAttr
= cpu_to_le32(iinfo
->i_lenEAttr
);
1845 efe
->lengthAllocDescs
= cpu_to_le32(iinfo
->i_lenAlloc
);
1846 efe
->checkpoint
= cpu_to_le32(iinfo
->i_checkpoint
);
1847 efe
->descTag
.tagIdent
= cpu_to_le16(TAG_IDENT_EFE
);
1848 crclen
= sizeof(struct extendedFileEntry
);
1852 if (iinfo
->i_strat4096
) {
1853 fe
->icbTag
.strategyType
= cpu_to_le16(4096);
1854 fe
->icbTag
.strategyParameter
= cpu_to_le16(1);
1855 fe
->icbTag
.numEntries
= cpu_to_le16(2);
1857 fe
->icbTag
.strategyType
= cpu_to_le16(4);
1858 fe
->icbTag
.numEntries
= cpu_to_le16(1);
1862 fe
->icbTag
.fileType
= ICBTAG_FILE_TYPE_USE
;
1863 else if (S_ISDIR(inode
->i_mode
))
1864 fe
->icbTag
.fileType
= ICBTAG_FILE_TYPE_DIRECTORY
;
1865 else if (S_ISREG(inode
->i_mode
))
1866 fe
->icbTag
.fileType
= ICBTAG_FILE_TYPE_REGULAR
;
1867 else if (S_ISLNK(inode
->i_mode
))
1868 fe
->icbTag
.fileType
= ICBTAG_FILE_TYPE_SYMLINK
;
1869 else if (S_ISBLK(inode
->i_mode
))
1870 fe
->icbTag
.fileType
= ICBTAG_FILE_TYPE_BLOCK
;
1871 else if (S_ISCHR(inode
->i_mode
))
1872 fe
->icbTag
.fileType
= ICBTAG_FILE_TYPE_CHAR
;
1873 else if (S_ISFIFO(inode
->i_mode
))
1874 fe
->icbTag
.fileType
= ICBTAG_FILE_TYPE_FIFO
;
1875 else if (S_ISSOCK(inode
->i_mode
))
1876 fe
->icbTag
.fileType
= ICBTAG_FILE_TYPE_SOCKET
;
1878 icbflags
= iinfo
->i_alloc_type
|
1879 ((inode
->i_mode
& S_ISUID
) ? ICBTAG_FLAG_SETUID
: 0) |
1880 ((inode
->i_mode
& S_ISGID
) ? ICBTAG_FLAG_SETGID
: 0) |
1881 ((inode
->i_mode
& S_ISVTX
) ? ICBTAG_FLAG_STICKY
: 0) |
1882 (le16_to_cpu(fe
->icbTag
.flags
) &
1883 ~(ICBTAG_FLAG_AD_MASK
| ICBTAG_FLAG_SETUID
|
1884 ICBTAG_FLAG_SETGID
| ICBTAG_FLAG_STICKY
));
1886 fe
->icbTag
.flags
= cpu_to_le16(icbflags
);
1887 if (sbi
->s_udfrev
>= 0x0200)
1888 fe
->descTag
.descVersion
= cpu_to_le16(3);
1890 fe
->descTag
.descVersion
= cpu_to_le16(2);
1891 fe
->descTag
.tagSerialNum
= cpu_to_le16(sbi
->s_serial_number
);
1892 fe
->descTag
.tagLocation
= cpu_to_le32(
1893 iinfo
->i_location
.logicalBlockNum
);
1894 crclen
+= iinfo
->i_lenEAttr
+ iinfo
->i_lenAlloc
- sizeof(struct tag
);
1895 fe
->descTag
.descCRCLength
= cpu_to_le16(crclen
);
1896 fe
->descTag
.descCRC
= cpu_to_le16(crc_itu_t(0, (char *)fe
+ sizeof(struct tag
),
1898 fe
->descTag
.tagChecksum
= udf_tag_checksum(&fe
->descTag
);
1900 set_buffer_uptodate(bh
);
1903 /* write the data blocks */
1904 mark_buffer_dirty(bh
);
1906 sync_dirty_buffer(bh
);
1907 if (buffer_write_io_error(bh
)) {
1908 udf_warn(inode
->i_sb
, "IO error syncing udf inode [%08lx]\n",
1918 struct inode
*__udf_iget(struct super_block
*sb
, struct kernel_lb_addr
*ino
,
1921 unsigned long block
= udf_get_lb_pblock(sb
, ino
, 0);
1922 struct inode
*inode
= iget_locked(sb
, block
);
1926 return ERR_PTR(-ENOMEM
);
1928 if (!(inode
->i_state
& I_NEW
)) {
1929 if (UDF_I(inode
)->i_hidden
!= hidden_inode
) {
1931 return ERR_PTR(-EFSCORRUPTED
);
1936 memcpy(&UDF_I(inode
)->i_location
, ino
, sizeof(struct kernel_lb_addr
));
1937 err
= udf_read_inode(inode
, hidden_inode
);
1940 return ERR_PTR(err
);
1942 unlock_new_inode(inode
);
1947 int udf_setup_indirect_aext(struct inode
*inode
, udf_pblk_t block
,
1948 struct extent_position
*epos
)
1950 struct super_block
*sb
= inode
->i_sb
;
1951 struct buffer_head
*bh
;
1952 struct allocExtDesc
*aed
;
1953 struct extent_position nepos
;
1954 struct kernel_lb_addr neloc
;
1957 if (UDF_I(inode
)->i_alloc_type
== ICBTAG_FLAG_AD_SHORT
)
1958 adsize
= sizeof(struct short_ad
);
1959 else if (UDF_I(inode
)->i_alloc_type
== ICBTAG_FLAG_AD_LONG
)
1960 adsize
= sizeof(struct long_ad
);
1964 neloc
.logicalBlockNum
= block
;
1965 neloc
.partitionReferenceNum
= epos
->block
.partitionReferenceNum
;
1967 bh
= sb_getblk(sb
, udf_get_lb_pblock(sb
, &neloc
, 0));
1971 memset(bh
->b_data
, 0x00, sb
->s_blocksize
);
1972 set_buffer_uptodate(bh
);
1974 mark_buffer_dirty_inode(bh
, inode
);
1976 aed
= (struct allocExtDesc
*)(bh
->b_data
);
1977 if (!UDF_QUERY_FLAG(sb
, UDF_FLAG_STRICT
)) {
1978 aed
->previousAllocExtLocation
=
1979 cpu_to_le32(epos
->block
.logicalBlockNum
);
1981 aed
->lengthAllocDescs
= cpu_to_le32(0);
1982 if (UDF_SB(sb
)->s_udfrev
>= 0x0200)
1986 udf_new_tag(bh
->b_data
, TAG_IDENT_AED
, ver
, 1, block
,
1987 sizeof(struct tag
));
1989 nepos
.block
= neloc
;
1990 nepos
.offset
= sizeof(struct allocExtDesc
);
1994 * Do we have to copy current last extent to make space for indirect
1997 if (epos
->offset
+ adsize
> sb
->s_blocksize
) {
1998 struct kernel_lb_addr cp_loc
;
2002 epos
->offset
-= adsize
;
2003 cp_type
= udf_current_aext(inode
, epos
, &cp_loc
, &cp_len
, 0);
2004 cp_len
|= ((uint32_t)cp_type
) << 30;
2006 __udf_add_aext(inode
, &nepos
, &cp_loc
, cp_len
, 1);
2007 udf_write_aext(inode
, epos
, &nepos
.block
,
2008 sb
->s_blocksize
| EXT_NEXT_EXTENT_ALLOCDESCS
, 0);
2010 __udf_add_aext(inode
, epos
, &nepos
.block
,
2011 sb
->s_blocksize
| EXT_NEXT_EXTENT_ALLOCDESCS
, 0);
2021 * Append extent at the given position - should be the first free one in inode
2022 * / indirect extent. This function assumes there is enough space in the inode
2023 * or indirect extent. Use udf_add_aext() if you didn't check for this before.
2025 int __udf_add_aext(struct inode
*inode
, struct extent_position
*epos
,
2026 struct kernel_lb_addr
*eloc
, uint32_t elen
, int inc
)
2028 struct udf_inode_info
*iinfo
= UDF_I(inode
);
2029 struct allocExtDesc
*aed
;
2032 if (iinfo
->i_alloc_type
== ICBTAG_FLAG_AD_SHORT
)
2033 adsize
= sizeof(struct short_ad
);
2034 else if (iinfo
->i_alloc_type
== ICBTAG_FLAG_AD_LONG
)
2035 adsize
= sizeof(struct long_ad
);
2040 WARN_ON(iinfo
->i_lenAlloc
!=
2041 epos
->offset
- udf_file_entry_alloc_offset(inode
));
2043 aed
= (struct allocExtDesc
*)epos
->bh
->b_data
;
2044 WARN_ON(le32_to_cpu(aed
->lengthAllocDescs
) !=
2045 epos
->offset
- sizeof(struct allocExtDesc
));
2046 WARN_ON(epos
->offset
+ adsize
> inode
->i_sb
->s_blocksize
);
2049 udf_write_aext(inode
, epos
, eloc
, elen
, inc
);
2052 iinfo
->i_lenAlloc
+= adsize
;
2053 mark_inode_dirty(inode
);
2055 aed
= (struct allocExtDesc
*)epos
->bh
->b_data
;
2056 le32_add_cpu(&aed
->lengthAllocDescs
, adsize
);
2057 if (!UDF_QUERY_FLAG(inode
->i_sb
, UDF_FLAG_STRICT
) ||
2058 UDF_SB(inode
->i_sb
)->s_udfrev
>= 0x0201)
2059 udf_update_tag(epos
->bh
->b_data
,
2060 epos
->offset
+ (inc
? 0 : adsize
));
2062 udf_update_tag(epos
->bh
->b_data
,
2063 sizeof(struct allocExtDesc
));
2064 mark_buffer_dirty_inode(epos
->bh
, inode
);
2071 * Append extent at given position - should be the first free one in inode
2072 * / indirect extent. Takes care of allocating and linking indirect blocks.
2074 int udf_add_aext(struct inode
*inode
, struct extent_position
*epos
,
2075 struct kernel_lb_addr
*eloc
, uint32_t elen
, int inc
)
2078 struct super_block
*sb
= inode
->i_sb
;
2080 if (UDF_I(inode
)->i_alloc_type
== ICBTAG_FLAG_AD_SHORT
)
2081 adsize
= sizeof(struct short_ad
);
2082 else if (UDF_I(inode
)->i_alloc_type
== ICBTAG_FLAG_AD_LONG
)
2083 adsize
= sizeof(struct long_ad
);
2087 if (epos
->offset
+ (2 * adsize
) > sb
->s_blocksize
) {
2089 udf_pblk_t new_block
;
2091 new_block
= udf_new_block(sb
, NULL
,
2092 epos
->block
.partitionReferenceNum
,
2093 epos
->block
.logicalBlockNum
, &err
);
2097 err
= udf_setup_indirect_aext(inode
, new_block
, epos
);
2102 return __udf_add_aext(inode
, epos
, eloc
, elen
, inc
);
2105 void udf_write_aext(struct inode
*inode
, struct extent_position
*epos
,
2106 struct kernel_lb_addr
*eloc
, uint32_t elen
, int inc
)
2110 struct short_ad
*sad
;
2111 struct long_ad
*lad
;
2112 struct udf_inode_info
*iinfo
= UDF_I(inode
);
2115 ptr
= iinfo
->i_data
+ epos
->offset
-
2116 udf_file_entry_alloc_offset(inode
) +
2119 ptr
= epos
->bh
->b_data
+ epos
->offset
;
2121 switch (iinfo
->i_alloc_type
) {
2122 case ICBTAG_FLAG_AD_SHORT
:
2123 sad
= (struct short_ad
*)ptr
;
2124 sad
->extLength
= cpu_to_le32(elen
);
2125 sad
->extPosition
= cpu_to_le32(eloc
->logicalBlockNum
);
2126 adsize
= sizeof(struct short_ad
);
2128 case ICBTAG_FLAG_AD_LONG
:
2129 lad
= (struct long_ad
*)ptr
;
2130 lad
->extLength
= cpu_to_le32(elen
);
2131 lad
->extLocation
= cpu_to_lelb(*eloc
);
2132 memset(lad
->impUse
, 0x00, sizeof(lad
->impUse
));
2133 adsize
= sizeof(struct long_ad
);
2140 if (!UDF_QUERY_FLAG(inode
->i_sb
, UDF_FLAG_STRICT
) ||
2141 UDF_SB(inode
->i_sb
)->s_udfrev
>= 0x0201) {
2142 struct allocExtDesc
*aed
=
2143 (struct allocExtDesc
*)epos
->bh
->b_data
;
2144 udf_update_tag(epos
->bh
->b_data
,
2145 le32_to_cpu(aed
->lengthAllocDescs
) +
2146 sizeof(struct allocExtDesc
));
2148 mark_buffer_dirty_inode(epos
->bh
, inode
);
2150 mark_inode_dirty(inode
);
2154 epos
->offset
+= adsize
;
2158 * Only 1 indirect extent in a row really makes sense but allow upto 16 in case
2159 * someone does some weird stuff.
2161 #define UDF_MAX_INDIR_EXTS 16
2163 int8_t udf_next_aext(struct inode
*inode
, struct extent_position
*epos
,
2164 struct kernel_lb_addr
*eloc
, uint32_t *elen
, int inc
)
2167 unsigned int indirections
= 0;
2169 while ((etype
= udf_current_aext(inode
, epos
, eloc
, elen
, inc
)) ==
2170 (EXT_NEXT_EXTENT_ALLOCDESCS
>> 30)) {
2173 if (++indirections
> UDF_MAX_INDIR_EXTS
) {
2174 udf_err(inode
->i_sb
,
2175 "too many indirect extents in inode %lu\n",
2180 epos
->block
= *eloc
;
2181 epos
->offset
= sizeof(struct allocExtDesc
);
2183 block
= udf_get_lb_pblock(inode
->i_sb
, &epos
->block
, 0);
2184 epos
->bh
= sb_bread(inode
->i_sb
, block
);
2186 udf_debug("reading block %u failed!\n", block
);
2194 int8_t udf_current_aext(struct inode
*inode
, struct extent_position
*epos
,
2195 struct kernel_lb_addr
*eloc
, uint32_t *elen
, int inc
)
2200 struct short_ad
*sad
;
2201 struct long_ad
*lad
;
2202 struct udf_inode_info
*iinfo
= UDF_I(inode
);
2206 epos
->offset
= udf_file_entry_alloc_offset(inode
);
2207 ptr
= iinfo
->i_data
+ epos
->offset
-
2208 udf_file_entry_alloc_offset(inode
) +
2210 alen
= udf_file_entry_alloc_offset(inode
) +
2214 epos
->offset
= sizeof(struct allocExtDesc
);
2215 ptr
= epos
->bh
->b_data
+ epos
->offset
;
2216 alen
= sizeof(struct allocExtDesc
) +
2217 le32_to_cpu(((struct allocExtDesc
*)epos
->bh
->b_data
)->
2221 switch (iinfo
->i_alloc_type
) {
2222 case ICBTAG_FLAG_AD_SHORT
:
2223 sad
= udf_get_fileshortad(ptr
, alen
, &epos
->offset
, inc
);
2226 etype
= le32_to_cpu(sad
->extLength
) >> 30;
2227 eloc
->logicalBlockNum
= le32_to_cpu(sad
->extPosition
);
2228 eloc
->partitionReferenceNum
=
2229 iinfo
->i_location
.partitionReferenceNum
;
2230 *elen
= le32_to_cpu(sad
->extLength
) & UDF_EXTENT_LENGTH_MASK
;
2232 case ICBTAG_FLAG_AD_LONG
:
2233 lad
= udf_get_filelongad(ptr
, alen
, &epos
->offset
, inc
);
2236 etype
= le32_to_cpu(lad
->extLength
) >> 30;
2237 *eloc
= lelb_to_cpu(lad
->extLocation
);
2238 *elen
= le32_to_cpu(lad
->extLength
) & UDF_EXTENT_LENGTH_MASK
;
2241 udf_debug("alloc_type = %u unsupported\n", iinfo
->i_alloc_type
);
2248 static int udf_insert_aext(struct inode
*inode
, struct extent_position epos
,
2249 struct kernel_lb_addr neloc
, uint32_t nelen
)
2251 struct kernel_lb_addr oeloc
;
2259 while ((etype
= udf_next_aext(inode
, &epos
, &oeloc
, &oelen
, 0)) != -1) {
2260 udf_write_aext(inode
, &epos
, &neloc
, nelen
, 1);
2262 nelen
= (etype
<< 30) | oelen
;
2264 err
= udf_add_aext(inode
, &epos
, &neloc
, nelen
, 1);
2270 int8_t udf_delete_aext(struct inode
*inode
, struct extent_position epos
)
2272 struct extent_position oepos
;
2275 struct allocExtDesc
*aed
;
2276 struct udf_inode_info
*iinfo
;
2277 struct kernel_lb_addr eloc
;
2285 iinfo
= UDF_I(inode
);
2286 if (iinfo
->i_alloc_type
== ICBTAG_FLAG_AD_SHORT
)
2287 adsize
= sizeof(struct short_ad
);
2288 else if (iinfo
->i_alloc_type
== ICBTAG_FLAG_AD_LONG
)
2289 adsize
= sizeof(struct long_ad
);
2294 if (udf_next_aext(inode
, &epos
, &eloc
, &elen
, 1) == -1)
2297 while ((etype
= udf_next_aext(inode
, &epos
, &eloc
, &elen
, 1)) != -1) {
2298 udf_write_aext(inode
, &oepos
, &eloc
, (etype
<< 30) | elen
, 1);
2299 if (oepos
.bh
!= epos
.bh
) {
2300 oepos
.block
= epos
.block
;
2304 oepos
.offset
= epos
.offset
- adsize
;
2307 memset(&eloc
, 0x00, sizeof(struct kernel_lb_addr
));
2310 if (epos
.bh
!= oepos
.bh
) {
2311 udf_free_blocks(inode
->i_sb
, inode
, &epos
.block
, 0, 1);
2312 udf_write_aext(inode
, &oepos
, &eloc
, elen
, 1);
2313 udf_write_aext(inode
, &oepos
, &eloc
, elen
, 1);
2315 iinfo
->i_lenAlloc
-= (adsize
* 2);
2316 mark_inode_dirty(inode
);
2318 aed
= (struct allocExtDesc
*)oepos
.bh
->b_data
;
2319 le32_add_cpu(&aed
->lengthAllocDescs
, -(2 * adsize
));
2320 if (!UDF_QUERY_FLAG(inode
->i_sb
, UDF_FLAG_STRICT
) ||
2321 UDF_SB(inode
->i_sb
)->s_udfrev
>= 0x0201)
2322 udf_update_tag(oepos
.bh
->b_data
,
2323 oepos
.offset
- (2 * adsize
));
2325 udf_update_tag(oepos
.bh
->b_data
,
2326 sizeof(struct allocExtDesc
));
2327 mark_buffer_dirty_inode(oepos
.bh
, inode
);
2330 udf_write_aext(inode
, &oepos
, &eloc
, elen
, 1);
2332 iinfo
->i_lenAlloc
-= adsize
;
2333 mark_inode_dirty(inode
);
2335 aed
= (struct allocExtDesc
*)oepos
.bh
->b_data
;
2336 le32_add_cpu(&aed
->lengthAllocDescs
, -adsize
);
2337 if (!UDF_QUERY_FLAG(inode
->i_sb
, UDF_FLAG_STRICT
) ||
2338 UDF_SB(inode
->i_sb
)->s_udfrev
>= 0x0201)
2339 udf_update_tag(oepos
.bh
->b_data
,
2340 epos
.offset
- adsize
);
2342 udf_update_tag(oepos
.bh
->b_data
,
2343 sizeof(struct allocExtDesc
));
2344 mark_buffer_dirty_inode(oepos
.bh
, inode
);
2351 return (elen
>> 30);
2354 int8_t inode_bmap(struct inode
*inode
, sector_t block
,
2355 struct extent_position
*pos
, struct kernel_lb_addr
*eloc
,
2356 uint32_t *elen
, sector_t
*offset
)
2358 unsigned char blocksize_bits
= inode
->i_sb
->s_blocksize_bits
;
2359 loff_t lbcount
= 0, bcount
= (loff_t
) block
<< blocksize_bits
;
2361 struct udf_inode_info
*iinfo
;
2363 iinfo
= UDF_I(inode
);
2364 if (!udf_read_extent_cache(inode
, bcount
, &lbcount
, pos
)) {
2366 pos
->block
= iinfo
->i_location
;
2371 etype
= udf_next_aext(inode
, pos
, eloc
, elen
, 1);
2373 *offset
= (bcount
- lbcount
) >> blocksize_bits
;
2374 iinfo
->i_lenExtents
= lbcount
;
2378 } while (lbcount
<= bcount
);
2379 /* update extent cache */
2380 udf_update_extent_cache(inode
, lbcount
- *elen
, pos
);
2381 *offset
= (bcount
+ *elen
- lbcount
) >> blocksize_bits
;