5 * Inode handling routines for the OSTA-UDF(tm) filesystem.
8 * This file is distributed under the terms of the GNU General Public
9 * License (GPL). Copies of the GPL can be obtained from:
10 * ftp://prep.ai.mit.edu/pub/gnu/GPL
11 * Each contributing author retains all rights to their own work.
13 * (C) 1998 Dave Boynton
14 * (C) 1998-2004 Ben Fennema
15 * (C) 1999-2000 Stelias Computing Inc
19 * 10/04/98 dgb Added rudimentary directory functions
20 * 10/07/98 Fully working udf_block_map! It works!
21 * 11/25/98 bmap altered to better support extents
22 * 12/06/98 blf partition support in udf_iget, udf_block_map
24 * 12/12/98 rewrote udf_block_map to handle next extents and descs across
25 * block boundaries (which is not actually allowed)
26 * 12/20/98 added support for strategy 4096
27 * 03/07/99 rewrote udf_block_map (again)
28 * New funcs, inode_bmap, udf_next_aext
29 * 04/19/99 Support for writing device EA's for major/minor #
34 #include <linux/module.h>
35 #include <linux/pagemap.h>
36 #include <linux/writeback.h>
37 #include <linux/slab.h>
38 #include <linux/crc-itu-t.h>
39 #include <linux/mpage.h>
40 #include <linux/uio.h>
41 #include <linux/bio.h>
46 #define EXTENT_MERGE_SIZE 5
48 static umode_t
udf_convert_permissions(struct fileEntry
*);
49 static int udf_update_inode(struct inode
*, int);
50 static int udf_sync_inode(struct inode
*inode
);
51 static int udf_alloc_i_data(struct inode
*inode
, size_t size
);
52 static sector_t
inode_getblk(struct inode
*, sector_t
, int *, int *);
53 static int8_t udf_insert_aext(struct inode
*, struct extent_position
,
54 struct kernel_lb_addr
, uint32_t);
55 static void udf_split_extents(struct inode
*, int *, int, int,
56 struct kernel_long_ad
*, int *);
57 static void udf_prealloc_extents(struct inode
*, int, int,
58 struct kernel_long_ad
*, int *);
59 static void udf_merge_extents(struct inode
*, struct kernel_long_ad
*, int *);
60 static void udf_update_extents(struct inode
*, struct kernel_long_ad
*, int,
61 int, struct extent_position
*);
62 static int udf_get_block(struct inode
*, sector_t
, struct buffer_head
*, int);
64 static void __udf_clear_extent_cache(struct inode
*inode
)
66 struct udf_inode_info
*iinfo
= UDF_I(inode
);
68 if (iinfo
->cached_extent
.lstart
!= -1) {
69 brelse(iinfo
->cached_extent
.epos
.bh
);
70 iinfo
->cached_extent
.lstart
= -1;
74 /* Invalidate extent cache */
75 static void udf_clear_extent_cache(struct inode
*inode
)
77 struct udf_inode_info
*iinfo
= UDF_I(inode
);
79 spin_lock(&iinfo
->i_extent_cache_lock
);
80 __udf_clear_extent_cache(inode
);
81 spin_unlock(&iinfo
->i_extent_cache_lock
);
84 /* Return contents of extent cache */
85 static int udf_read_extent_cache(struct inode
*inode
, loff_t bcount
,
86 loff_t
*lbcount
, struct extent_position
*pos
)
88 struct udf_inode_info
*iinfo
= UDF_I(inode
);
91 spin_lock(&iinfo
->i_extent_cache_lock
);
92 if ((iinfo
->cached_extent
.lstart
<= bcount
) &&
93 (iinfo
->cached_extent
.lstart
!= -1)) {
95 *lbcount
= iinfo
->cached_extent
.lstart
;
96 memcpy(pos
, &iinfo
->cached_extent
.epos
,
97 sizeof(struct extent_position
));
102 spin_unlock(&iinfo
->i_extent_cache_lock
);
106 /* Add extent to extent cache */
107 static void udf_update_extent_cache(struct inode
*inode
, loff_t estart
,
108 struct extent_position
*pos
)
110 struct udf_inode_info
*iinfo
= UDF_I(inode
);
112 spin_lock(&iinfo
->i_extent_cache_lock
);
113 /* Invalidate previously cached extent */
114 __udf_clear_extent_cache(inode
);
117 memcpy(&iinfo
->cached_extent
.epos
, pos
, sizeof(*pos
));
118 iinfo
->cached_extent
.lstart
= estart
;
119 switch (iinfo
->i_alloc_type
) {
120 case ICBTAG_FLAG_AD_SHORT
:
121 iinfo
->cached_extent
.epos
.offset
-= sizeof(struct short_ad
);
123 case ICBTAG_FLAG_AD_LONG
:
124 iinfo
->cached_extent
.epos
.offset
-= sizeof(struct long_ad
);
127 spin_unlock(&iinfo
->i_extent_cache_lock
);
130 void udf_evict_inode(struct inode
*inode
)
132 struct udf_inode_info
*iinfo
= UDF_I(inode
);
135 if (!is_bad_inode(inode
)) {
136 if (!inode
->i_nlink
) {
138 udf_setsize(inode
, 0);
139 udf_update_inode(inode
, IS_SYNC(inode
));
141 if (iinfo
->i_alloc_type
!= ICBTAG_FLAG_AD_IN_ICB
&&
142 inode
->i_size
!= iinfo
->i_lenExtents
) {
143 udf_warn(inode
->i_sb
,
144 "Inode %lu (mode %o) has inode size %llu different from extent length %llu. Filesystem need not be standards compliant.\n",
145 inode
->i_ino
, inode
->i_mode
,
146 (unsigned long long)inode
->i_size
,
147 (unsigned long long)iinfo
->i_lenExtents
);
150 truncate_inode_pages_final(&inode
->i_data
);
151 invalidate_inode_buffers(inode
);
153 kfree(iinfo
->i_ext
.i_data
);
154 iinfo
->i_ext
.i_data
= NULL
;
155 udf_clear_extent_cache(inode
);
157 udf_free_inode(inode
);
161 static void udf_write_failed(struct address_space
*mapping
, loff_t to
)
163 struct inode
*inode
= mapping
->host
;
164 struct udf_inode_info
*iinfo
= UDF_I(inode
);
165 loff_t isize
= inode
->i_size
;
168 truncate_pagecache(inode
, isize
);
169 if (iinfo
->i_alloc_type
!= ICBTAG_FLAG_AD_IN_ICB
) {
170 down_write(&iinfo
->i_data_sem
);
171 udf_clear_extent_cache(inode
);
172 udf_truncate_extents(inode
);
173 up_write(&iinfo
->i_data_sem
);
178 static int udf_writepage(struct page
*page
, struct writeback_control
*wbc
)
180 return block_write_full_page(page
, udf_get_block
, wbc
);
183 static int udf_writepages(struct address_space
*mapping
,
184 struct writeback_control
*wbc
)
186 return mpage_writepages(mapping
, wbc
, udf_get_block
);
189 static int udf_readpage(struct file
*file
, struct page
*page
)
191 return mpage_readpage(page
, udf_get_block
);
194 static int udf_readpages(struct file
*file
, struct address_space
*mapping
,
195 struct list_head
*pages
, unsigned nr_pages
)
197 return mpage_readpages(mapping
, pages
, nr_pages
, udf_get_block
);
200 static int udf_write_begin(struct file
*file
, struct address_space
*mapping
,
201 loff_t pos
, unsigned len
, unsigned flags
,
202 struct page
**pagep
, void **fsdata
)
206 ret
= block_write_begin(mapping
, pos
, len
, flags
, pagep
, udf_get_block
);
208 udf_write_failed(mapping
, pos
+ len
);
212 static ssize_t
udf_direct_IO(struct kiocb
*iocb
, struct iov_iter
*iter
)
214 struct file
*file
= iocb
->ki_filp
;
215 struct address_space
*mapping
= file
->f_mapping
;
216 struct inode
*inode
= mapping
->host
;
217 size_t count
= iov_iter_count(iter
);
220 ret
= blockdev_direct_IO(iocb
, inode
, iter
, udf_get_block
);
221 if (unlikely(ret
< 0 && iov_iter_rw(iter
) == WRITE
))
222 udf_write_failed(mapping
, iocb
->ki_pos
+ count
);
226 static sector_t
udf_bmap(struct address_space
*mapping
, sector_t block
)
228 return generic_block_bmap(mapping
, block
, udf_get_block
);
231 const struct address_space_operations udf_aops
= {
232 .readpage
= udf_readpage
,
233 .readpages
= udf_readpages
,
234 .writepage
= udf_writepage
,
235 .writepages
= udf_writepages
,
236 .write_begin
= udf_write_begin
,
237 .write_end
= generic_write_end
,
238 .direct_IO
= udf_direct_IO
,
243 * Expand file stored in ICB to a normal one-block-file
245 * This function requires i_data_sem for writing and releases it.
246 * This function requires i_mutex held
248 int udf_expand_file_adinicb(struct inode
*inode
)
252 struct udf_inode_info
*iinfo
= UDF_I(inode
);
254 struct writeback_control udf_wbc
= {
255 .sync_mode
= WB_SYNC_NONE
,
259 WARN_ON_ONCE(!inode_is_locked(inode
));
260 if (!iinfo
->i_lenAlloc
) {
261 if (UDF_QUERY_FLAG(inode
->i_sb
, UDF_FLAG_USE_SHORT_AD
))
262 iinfo
->i_alloc_type
= ICBTAG_FLAG_AD_SHORT
;
264 iinfo
->i_alloc_type
= ICBTAG_FLAG_AD_LONG
;
265 /* from now on we have normal address_space methods */
266 inode
->i_data
.a_ops
= &udf_aops
;
267 up_write(&iinfo
->i_data_sem
);
268 mark_inode_dirty(inode
);
272 * Release i_data_sem so that we can lock a page - page lock ranks
273 * above i_data_sem. i_mutex still protects us against file changes.
275 up_write(&iinfo
->i_data_sem
);
277 page
= find_or_create_page(inode
->i_mapping
, 0, GFP_NOFS
);
281 if (!PageUptodate(page
)) {
282 kaddr
= kmap_atomic(page
);
283 memset(kaddr
+ iinfo
->i_lenAlloc
, 0x00,
284 PAGE_SIZE
- iinfo
->i_lenAlloc
);
285 memcpy(kaddr
, iinfo
->i_ext
.i_data
+ iinfo
->i_lenEAttr
,
287 flush_dcache_page(page
);
288 SetPageUptodate(page
);
289 kunmap_atomic(kaddr
);
291 down_write(&iinfo
->i_data_sem
);
292 memset(iinfo
->i_ext
.i_data
+ iinfo
->i_lenEAttr
, 0x00,
294 iinfo
->i_lenAlloc
= 0;
295 if (UDF_QUERY_FLAG(inode
->i_sb
, UDF_FLAG_USE_SHORT_AD
))
296 iinfo
->i_alloc_type
= ICBTAG_FLAG_AD_SHORT
;
298 iinfo
->i_alloc_type
= ICBTAG_FLAG_AD_LONG
;
299 /* from now on we have normal address_space methods */
300 inode
->i_data
.a_ops
= &udf_aops
;
301 up_write(&iinfo
->i_data_sem
);
302 err
= inode
->i_data
.a_ops
->writepage(page
, &udf_wbc
);
304 /* Restore everything back so that we don't lose data... */
306 down_write(&iinfo
->i_data_sem
);
307 kaddr
= kmap_atomic(page
);
308 memcpy(iinfo
->i_ext
.i_data
+ iinfo
->i_lenEAttr
, kaddr
,
310 kunmap_atomic(kaddr
);
312 iinfo
->i_alloc_type
= ICBTAG_FLAG_AD_IN_ICB
;
313 inode
->i_data
.a_ops
= &udf_adinicb_aops
;
314 up_write(&iinfo
->i_data_sem
);
317 mark_inode_dirty(inode
);
322 struct buffer_head
*udf_expand_dir_adinicb(struct inode
*inode
, int *block
,
326 struct buffer_head
*dbh
= NULL
;
327 struct kernel_lb_addr eloc
;
329 struct extent_position epos
;
331 struct udf_fileident_bh sfibh
, dfibh
;
332 loff_t f_pos
= udf_ext0_offset(inode
);
333 int size
= udf_ext0_offset(inode
) + inode
->i_size
;
334 struct fileIdentDesc cfi
, *sfi
, *dfi
;
335 struct udf_inode_info
*iinfo
= UDF_I(inode
);
337 if (UDF_QUERY_FLAG(inode
->i_sb
, UDF_FLAG_USE_SHORT_AD
))
338 alloctype
= ICBTAG_FLAG_AD_SHORT
;
340 alloctype
= ICBTAG_FLAG_AD_LONG
;
342 if (!inode
->i_size
) {
343 iinfo
->i_alloc_type
= alloctype
;
344 mark_inode_dirty(inode
);
348 /* alloc block, and copy data to it */
349 *block
= udf_new_block(inode
->i_sb
, inode
,
350 iinfo
->i_location
.partitionReferenceNum
,
351 iinfo
->i_location
.logicalBlockNum
, err
);
354 newblock
= udf_get_pblock(inode
->i_sb
, *block
,
355 iinfo
->i_location
.partitionReferenceNum
,
359 dbh
= udf_tgetblk(inode
->i_sb
, newblock
);
363 memset(dbh
->b_data
, 0x00, inode
->i_sb
->s_blocksize
);
364 set_buffer_uptodate(dbh
);
366 mark_buffer_dirty_inode(dbh
, inode
);
368 sfibh
.soffset
= sfibh
.eoffset
=
369 f_pos
& (inode
->i_sb
->s_blocksize
- 1);
370 sfibh
.sbh
= sfibh
.ebh
= NULL
;
371 dfibh
.soffset
= dfibh
.eoffset
= 0;
372 dfibh
.sbh
= dfibh
.ebh
= dbh
;
373 while (f_pos
< size
) {
374 iinfo
->i_alloc_type
= ICBTAG_FLAG_AD_IN_ICB
;
375 sfi
= udf_fileident_read(inode
, &f_pos
, &sfibh
, &cfi
, NULL
,
381 iinfo
->i_alloc_type
= alloctype
;
382 sfi
->descTag
.tagLocation
= cpu_to_le32(*block
);
383 dfibh
.soffset
= dfibh
.eoffset
;
384 dfibh
.eoffset
+= (sfibh
.eoffset
- sfibh
.soffset
);
385 dfi
= (struct fileIdentDesc
*)(dbh
->b_data
+ dfibh
.soffset
);
386 if (udf_write_fi(inode
, sfi
, dfi
, &dfibh
, sfi
->impUse
,
388 le16_to_cpu(sfi
->lengthOfImpUse
))) {
389 iinfo
->i_alloc_type
= ICBTAG_FLAG_AD_IN_ICB
;
394 mark_buffer_dirty_inode(dbh
, inode
);
396 memset(iinfo
->i_ext
.i_data
+ iinfo
->i_lenEAttr
, 0,
398 iinfo
->i_lenAlloc
= 0;
399 eloc
.logicalBlockNum
= *block
;
400 eloc
.partitionReferenceNum
=
401 iinfo
->i_location
.partitionReferenceNum
;
402 iinfo
->i_lenExtents
= inode
->i_size
;
404 epos
.block
= iinfo
->i_location
;
405 epos
.offset
= udf_file_entry_alloc_offset(inode
);
406 udf_add_aext(inode
, &epos
, &eloc
, inode
->i_size
, 0);
410 mark_inode_dirty(inode
);
414 static int udf_get_block(struct inode
*inode
, sector_t block
,
415 struct buffer_head
*bh_result
, int create
)
419 struct udf_inode_info
*iinfo
;
422 phys
= udf_block_map(inode
, block
);
424 map_bh(bh_result
, inode
->i_sb
, phys
);
430 iinfo
= UDF_I(inode
);
432 down_write(&iinfo
->i_data_sem
);
433 if (block
== iinfo
->i_next_alloc_block
+ 1) {
434 iinfo
->i_next_alloc_block
++;
435 iinfo
->i_next_alloc_goal
++;
438 udf_clear_extent_cache(inode
);
439 phys
= inode_getblk(inode
, block
, &err
, &new);
444 set_buffer_new(bh_result
);
445 map_bh(bh_result
, inode
->i_sb
, phys
);
448 up_write(&iinfo
->i_data_sem
);
452 static struct buffer_head
*udf_getblk(struct inode
*inode
, long block
,
453 int create
, int *err
)
455 struct buffer_head
*bh
;
456 struct buffer_head dummy
;
459 dummy
.b_blocknr
= -1000;
460 *err
= udf_get_block(inode
, block
, &dummy
, create
);
461 if (!*err
&& buffer_mapped(&dummy
)) {
462 bh
= sb_getblk(inode
->i_sb
, dummy
.b_blocknr
);
463 if (buffer_new(&dummy
)) {
465 memset(bh
->b_data
, 0x00, inode
->i_sb
->s_blocksize
);
466 set_buffer_uptodate(bh
);
468 mark_buffer_dirty_inode(bh
, inode
);
476 /* Extend the file with new blocks totaling 'new_block_bytes',
477 * return the number of extents added
479 static int udf_do_extend_file(struct inode
*inode
,
480 struct extent_position
*last_pos
,
481 struct kernel_long_ad
*last_ext
,
482 loff_t new_block_bytes
)
485 int count
= 0, fake
= !(last_ext
->extLength
& UDF_EXTENT_LENGTH_MASK
);
486 struct super_block
*sb
= inode
->i_sb
;
487 struct kernel_lb_addr prealloc_loc
= {};
488 int prealloc_len
= 0;
489 struct udf_inode_info
*iinfo
;
492 /* The previous extent is fake and we should not extend by anything
493 * - there's nothing to do... */
494 if (!new_block_bytes
&& fake
)
497 iinfo
= UDF_I(inode
);
498 /* Round the last extent up to a multiple of block size */
499 if (last_ext
->extLength
& (sb
->s_blocksize
- 1)) {
500 last_ext
->extLength
=
501 (last_ext
->extLength
& UDF_EXTENT_FLAG_MASK
) |
502 (((last_ext
->extLength
& UDF_EXTENT_LENGTH_MASK
) +
503 sb
->s_blocksize
- 1) & ~(sb
->s_blocksize
- 1));
504 iinfo
->i_lenExtents
=
505 (iinfo
->i_lenExtents
+ sb
->s_blocksize
- 1) &
506 ~(sb
->s_blocksize
- 1);
509 /* Last extent are just preallocated blocks? */
510 if ((last_ext
->extLength
& UDF_EXTENT_FLAG_MASK
) ==
511 EXT_NOT_RECORDED_ALLOCATED
) {
512 /* Save the extent so that we can reattach it to the end */
513 prealloc_loc
= last_ext
->extLocation
;
514 prealloc_len
= last_ext
->extLength
;
515 /* Mark the extent as a hole */
516 last_ext
->extLength
= EXT_NOT_RECORDED_NOT_ALLOCATED
|
517 (last_ext
->extLength
& UDF_EXTENT_LENGTH_MASK
);
518 last_ext
->extLocation
.logicalBlockNum
= 0;
519 last_ext
->extLocation
.partitionReferenceNum
= 0;
522 /* Can we merge with the previous extent? */
523 if ((last_ext
->extLength
& UDF_EXTENT_FLAG_MASK
) ==
524 EXT_NOT_RECORDED_NOT_ALLOCATED
) {
525 add
= (1 << 30) - sb
->s_blocksize
-
526 (last_ext
->extLength
& UDF_EXTENT_LENGTH_MASK
);
527 if (add
> new_block_bytes
)
528 add
= new_block_bytes
;
529 new_block_bytes
-= add
;
530 last_ext
->extLength
+= add
;
534 udf_add_aext(inode
, last_pos
, &last_ext
->extLocation
,
535 last_ext
->extLength
, 1);
538 struct kernel_lb_addr tmploc
;
541 udf_write_aext(inode
, last_pos
, &last_ext
->extLocation
,
542 last_ext
->extLength
, 1);
544 * We've rewritten the last extent but there may be empty
545 * indirect extent after it - enter it.
547 udf_next_aext(inode
, last_pos
, &tmploc
, &tmplen
, 0);
550 /* Managed to do everything necessary? */
551 if (!new_block_bytes
)
554 /* All further extents will be NOT_RECORDED_NOT_ALLOCATED */
555 last_ext
->extLocation
.logicalBlockNum
= 0;
556 last_ext
->extLocation
.partitionReferenceNum
= 0;
557 add
= (1 << 30) - sb
->s_blocksize
;
558 last_ext
->extLength
= EXT_NOT_RECORDED_NOT_ALLOCATED
| add
;
560 /* Create enough extents to cover the whole hole */
561 while (new_block_bytes
> add
) {
562 new_block_bytes
-= add
;
563 err
= udf_add_aext(inode
, last_pos
, &last_ext
->extLocation
,
564 last_ext
->extLength
, 1);
569 if (new_block_bytes
) {
570 last_ext
->extLength
= EXT_NOT_RECORDED_NOT_ALLOCATED
|
572 err
= udf_add_aext(inode
, last_pos
, &last_ext
->extLocation
,
573 last_ext
->extLength
, 1);
580 /* Do we have some preallocated blocks saved? */
582 err
= udf_add_aext(inode
, last_pos
, &prealloc_loc
,
586 last_ext
->extLocation
= prealloc_loc
;
587 last_ext
->extLength
= prealloc_len
;
591 /* last_pos should point to the last written extent... */
592 if (iinfo
->i_alloc_type
== ICBTAG_FLAG_AD_SHORT
)
593 last_pos
->offset
-= sizeof(struct short_ad
);
594 else if (iinfo
->i_alloc_type
== ICBTAG_FLAG_AD_LONG
)
595 last_pos
->offset
-= sizeof(struct long_ad
);
602 /* Extend the final block of the file to final_block_len bytes */
603 static void udf_do_extend_final_block(struct inode
*inode
,
604 struct extent_position
*last_pos
,
605 struct kernel_long_ad
*last_ext
,
606 uint32_t final_block_len
)
608 struct super_block
*sb
= inode
->i_sb
;
609 uint32_t added_bytes
;
611 added_bytes
= final_block_len
-
612 (last_ext
->extLength
& (sb
->s_blocksize
- 1));
613 last_ext
->extLength
+= added_bytes
;
614 UDF_I(inode
)->i_lenExtents
+= added_bytes
;
616 udf_write_aext(inode
, last_pos
, &last_ext
->extLocation
,
617 last_ext
->extLength
, 1);
620 static int udf_extend_file(struct inode
*inode
, loff_t newsize
)
623 struct extent_position epos
;
624 struct kernel_lb_addr eloc
;
627 struct super_block
*sb
= inode
->i_sb
;
628 sector_t first_block
= newsize
>> sb
->s_blocksize_bits
, offset
;
629 unsigned long partial_final_block
;
631 struct udf_inode_info
*iinfo
= UDF_I(inode
);
632 struct kernel_long_ad extent
;
634 int within_final_block
;
636 if (iinfo
->i_alloc_type
== ICBTAG_FLAG_AD_SHORT
)
637 adsize
= sizeof(struct short_ad
);
638 else if (iinfo
->i_alloc_type
== ICBTAG_FLAG_AD_LONG
)
639 adsize
= sizeof(struct long_ad
);
643 etype
= inode_bmap(inode
, first_block
, &epos
, &eloc
, &elen
, &offset
);
644 within_final_block
= (etype
!= -1);
646 if ((!epos
.bh
&& epos
.offset
== udf_file_entry_alloc_offset(inode
)) ||
647 (epos
.bh
&& epos
.offset
== sizeof(struct allocExtDesc
))) {
648 /* File has no extents at all or has empty last
649 * indirect extent! Create a fake extent... */
650 extent
.extLocation
.logicalBlockNum
= 0;
651 extent
.extLocation
.partitionReferenceNum
= 0;
652 extent
.extLength
= EXT_NOT_RECORDED_NOT_ALLOCATED
;
654 epos
.offset
-= adsize
;
655 etype
= udf_next_aext(inode
, &epos
, &extent
.extLocation
,
656 &extent
.extLength
, 0);
657 extent
.extLength
|= etype
<< 30;
660 partial_final_block
= newsize
& (sb
->s_blocksize
- 1);
662 /* File has extent covering the new size (could happen when extending
665 if (within_final_block
) {
666 /* Extending file within the last file block */
667 udf_do_extend_final_block(inode
, &epos
, &extent
,
668 partial_final_block
);
670 loff_t add
= ((loff_t
)offset
<< sb
->s_blocksize_bits
) |
672 err
= udf_do_extend_file(inode
, &epos
, &extent
, add
);
678 iinfo
->i_lenExtents
= newsize
;
684 static sector_t
inode_getblk(struct inode
*inode
, sector_t block
,
687 struct kernel_long_ad laarr
[EXTENT_MERGE_SIZE
];
688 struct extent_position prev_epos
, cur_epos
, next_epos
;
689 int count
= 0, startnum
= 0, endnum
= 0;
690 uint32_t elen
= 0, tmpelen
;
691 struct kernel_lb_addr eloc
, tmpeloc
;
693 loff_t lbcount
= 0, b_off
= 0;
694 uint32_t newblocknum
, newblock
;
697 struct udf_inode_info
*iinfo
= UDF_I(inode
);
698 int goal
= 0, pgoal
= iinfo
->i_location
.logicalBlockNum
;
704 prev_epos
.offset
= udf_file_entry_alloc_offset(inode
);
705 prev_epos
.block
= iinfo
->i_location
;
707 cur_epos
= next_epos
= prev_epos
;
708 b_off
= (loff_t
)block
<< inode
->i_sb
->s_blocksize_bits
;
710 /* find the extent which contains the block we are looking for.
711 alternate between laarr[0] and laarr[1] for locations of the
712 current extent, and the previous extent */
714 if (prev_epos
.bh
!= cur_epos
.bh
) {
715 brelse(prev_epos
.bh
);
717 prev_epos
.bh
= cur_epos
.bh
;
719 if (cur_epos
.bh
!= next_epos
.bh
) {
721 get_bh(next_epos
.bh
);
722 cur_epos
.bh
= next_epos
.bh
;
727 prev_epos
.block
= cur_epos
.block
;
728 cur_epos
.block
= next_epos
.block
;
730 prev_epos
.offset
= cur_epos
.offset
;
731 cur_epos
.offset
= next_epos
.offset
;
733 etype
= udf_next_aext(inode
, &next_epos
, &eloc
, &elen
, 1);
739 laarr
[c
].extLength
= (etype
<< 30) | elen
;
740 laarr
[c
].extLocation
= eloc
;
742 if (etype
!= (EXT_NOT_RECORDED_NOT_ALLOCATED
>> 30))
743 pgoal
= eloc
.logicalBlockNum
+
744 ((elen
+ inode
->i_sb
->s_blocksize
- 1) >>
745 inode
->i_sb
->s_blocksize_bits
);
748 } while (lbcount
+ elen
<= b_off
);
751 offset
= b_off
>> inode
->i_sb
->s_blocksize_bits
;
753 * Move prev_epos and cur_epos into indirect extent if we are at
756 udf_next_aext(inode
, &prev_epos
, &tmpeloc
, &tmpelen
, 0);
757 udf_next_aext(inode
, &cur_epos
, &tmpeloc
, &tmpelen
, 0);
759 /* if the extent is allocated and recorded, return the block
760 if the extent is not a multiple of the blocksize, round up */
762 if (etype
== (EXT_RECORDED_ALLOCATED
>> 30)) {
763 if (elen
& (inode
->i_sb
->s_blocksize
- 1)) {
764 elen
= EXT_RECORDED_ALLOCATED
|
765 ((elen
+ inode
->i_sb
->s_blocksize
- 1) &
766 ~(inode
->i_sb
->s_blocksize
- 1));
767 udf_write_aext(inode
, &cur_epos
, &eloc
, elen
, 1);
769 newblock
= udf_get_lb_pblock(inode
->i_sb
, &eloc
, offset
);
773 /* Are we beyond EOF? */
783 /* Create a fake extent when there's not one */
784 memset(&laarr
[0].extLocation
, 0x00,
785 sizeof(struct kernel_lb_addr
));
786 laarr
[0].extLength
= EXT_NOT_RECORDED_NOT_ALLOCATED
;
787 /* Will udf_do_extend_file() create real extent from
789 startnum
= (offset
> 0);
791 /* Create extents for the hole between EOF and offset */
792 hole_len
= (loff_t
)offset
<< inode
->i_blkbits
;
793 ret
= udf_do_extend_file(inode
, &prev_epos
, laarr
, hole_len
);
802 /* We are not covered by a preallocated extent? */
803 if ((laarr
[0].extLength
& UDF_EXTENT_FLAG_MASK
) !=
804 EXT_NOT_RECORDED_ALLOCATED
) {
805 /* Is there any real extent? - otherwise we overwrite
809 laarr
[c
].extLength
= EXT_NOT_RECORDED_NOT_ALLOCATED
|
810 inode
->i_sb
->s_blocksize
;
811 memset(&laarr
[c
].extLocation
, 0x00,
812 sizeof(struct kernel_lb_addr
));
819 endnum
= startnum
= ((count
> 2) ? 2 : count
);
821 /* if the current extent is in position 0,
822 swap it with the previous */
823 if (!c
&& count
!= 1) {
830 /* if the current block is located in an extent,
831 read the next extent */
832 etype
= udf_next_aext(inode
, &next_epos
, &eloc
, &elen
, 0);
834 laarr
[c
+ 1].extLength
= (etype
<< 30) | elen
;
835 laarr
[c
+ 1].extLocation
= eloc
;
843 /* if the current extent is not recorded but allocated, get the
844 * block in the extent corresponding to the requested block */
845 if ((laarr
[c
].extLength
>> 30) == (EXT_NOT_RECORDED_ALLOCATED
>> 30))
846 newblocknum
= laarr
[c
].extLocation
.logicalBlockNum
+ offset
;
847 else { /* otherwise, allocate a new block */
848 if (iinfo
->i_next_alloc_block
== block
)
849 goal
= iinfo
->i_next_alloc_goal
;
852 if (!(goal
= pgoal
)) /* XXX: what was intended here? */
853 goal
= iinfo
->i_location
.logicalBlockNum
+ 1;
856 newblocknum
= udf_new_block(inode
->i_sb
, inode
,
857 iinfo
->i_location
.partitionReferenceNum
,
865 iinfo
->i_lenExtents
+= inode
->i_sb
->s_blocksize
;
868 /* if the extent the requsted block is located in contains multiple
869 * blocks, split the extent into at most three extents. blocks prior
870 * to requested block, requested block, and blocks after requested
872 udf_split_extents(inode
, &c
, offset
, newblocknum
, laarr
, &endnum
);
874 /* We preallocate blocks only for regular files. It also makes sense
875 * for directories but there's a problem when to drop the
876 * preallocation. We might use some delayed work for that but I feel
877 * it's overengineering for a filesystem like UDF. */
878 if (S_ISREG(inode
->i_mode
))
879 udf_prealloc_extents(inode
, c
, lastblock
, laarr
, &endnum
);
881 /* merge any continuous blocks in laarr */
882 udf_merge_extents(inode
, laarr
, &endnum
);
884 /* write back the new extents, inserting new extents if the new number
885 * of extents is greater than the old number, and deleting extents if
886 * the new number of extents is less than the old number */
887 udf_update_extents(inode
, laarr
, startnum
, endnum
, &prev_epos
);
889 newblock
= udf_get_pblock(inode
->i_sb
, newblocknum
,
890 iinfo
->i_location
.partitionReferenceNum
, 0);
896 iinfo
->i_next_alloc_block
= block
;
897 iinfo
->i_next_alloc_goal
= newblocknum
;
898 inode
->i_ctime
= current_time(inode
);
901 udf_sync_inode(inode
);
903 mark_inode_dirty(inode
);
905 brelse(prev_epos
.bh
);
907 brelse(next_epos
.bh
);
911 static void udf_split_extents(struct inode
*inode
, int *c
, int offset
,
912 int newblocknum
, struct kernel_long_ad
*laarr
,
915 unsigned long blocksize
= inode
->i_sb
->s_blocksize
;
916 unsigned char blocksize_bits
= inode
->i_sb
->s_blocksize_bits
;
918 if ((laarr
[*c
].extLength
>> 30) == (EXT_NOT_RECORDED_ALLOCATED
>> 30) ||
919 (laarr
[*c
].extLength
>> 30) ==
920 (EXT_NOT_RECORDED_NOT_ALLOCATED
>> 30)) {
922 int blen
= ((laarr
[curr
].extLength
& UDF_EXTENT_LENGTH_MASK
) +
923 blocksize
- 1) >> blocksize_bits
;
924 int8_t etype
= (laarr
[curr
].extLength
>> 30);
928 else if (!offset
|| blen
== offset
+ 1) {
929 laarr
[curr
+ 2] = laarr
[curr
+ 1];
930 laarr
[curr
+ 1] = laarr
[curr
];
932 laarr
[curr
+ 3] = laarr
[curr
+ 1];
933 laarr
[curr
+ 2] = laarr
[curr
+ 1] = laarr
[curr
];
937 if (etype
== (EXT_NOT_RECORDED_ALLOCATED
>> 30)) {
938 udf_free_blocks(inode
->i_sb
, inode
,
939 &laarr
[curr
].extLocation
,
941 laarr
[curr
].extLength
=
942 EXT_NOT_RECORDED_NOT_ALLOCATED
|
943 (offset
<< blocksize_bits
);
944 laarr
[curr
].extLocation
.logicalBlockNum
= 0;
945 laarr
[curr
].extLocation
.
946 partitionReferenceNum
= 0;
948 laarr
[curr
].extLength
= (etype
<< 30) |
949 (offset
<< blocksize_bits
);
955 laarr
[curr
].extLocation
.logicalBlockNum
= newblocknum
;
956 if (etype
== (EXT_NOT_RECORDED_NOT_ALLOCATED
>> 30))
957 laarr
[curr
].extLocation
.partitionReferenceNum
=
958 UDF_I(inode
)->i_location
.partitionReferenceNum
;
959 laarr
[curr
].extLength
= EXT_RECORDED_ALLOCATED
|
963 if (blen
!= offset
+ 1) {
964 if (etype
== (EXT_NOT_RECORDED_ALLOCATED
>> 30))
965 laarr
[curr
].extLocation
.logicalBlockNum
+=
967 laarr
[curr
].extLength
= (etype
<< 30) |
968 ((blen
- (offset
+ 1)) << blocksize_bits
);
975 static void udf_prealloc_extents(struct inode
*inode
, int c
, int lastblock
,
976 struct kernel_long_ad
*laarr
,
979 int start
, length
= 0, currlength
= 0, i
;
981 if (*endnum
>= (c
+ 1)) {
987 if ((laarr
[c
+ 1].extLength
>> 30) ==
988 (EXT_NOT_RECORDED_ALLOCATED
>> 30)) {
990 length
= currlength
=
991 (((laarr
[c
+ 1].extLength
&
992 UDF_EXTENT_LENGTH_MASK
) +
993 inode
->i_sb
->s_blocksize
- 1) >>
994 inode
->i_sb
->s_blocksize_bits
);
999 for (i
= start
+ 1; i
<= *endnum
; i
++) {
1002 length
+= UDF_DEFAULT_PREALLOC_BLOCKS
;
1003 } else if ((laarr
[i
].extLength
>> 30) ==
1004 (EXT_NOT_RECORDED_NOT_ALLOCATED
>> 30)) {
1005 length
+= (((laarr
[i
].extLength
&
1006 UDF_EXTENT_LENGTH_MASK
) +
1007 inode
->i_sb
->s_blocksize
- 1) >>
1008 inode
->i_sb
->s_blocksize_bits
);
1014 int next
= laarr
[start
].extLocation
.logicalBlockNum
+
1015 (((laarr
[start
].extLength
& UDF_EXTENT_LENGTH_MASK
) +
1016 inode
->i_sb
->s_blocksize
- 1) >>
1017 inode
->i_sb
->s_blocksize_bits
);
1018 int numalloc
= udf_prealloc_blocks(inode
->i_sb
, inode
,
1019 laarr
[start
].extLocation
.partitionReferenceNum
,
1020 next
, (UDF_DEFAULT_PREALLOC_BLOCKS
> length
?
1021 length
: UDF_DEFAULT_PREALLOC_BLOCKS
) -
1024 if (start
== (c
+ 1))
1025 laarr
[start
].extLength
+=
1027 inode
->i_sb
->s_blocksize_bits
);
1029 memmove(&laarr
[c
+ 2], &laarr
[c
+ 1],
1030 sizeof(struct long_ad
) * (*endnum
- (c
+ 1)));
1032 laarr
[c
+ 1].extLocation
.logicalBlockNum
= next
;
1033 laarr
[c
+ 1].extLocation
.partitionReferenceNum
=
1034 laarr
[c
].extLocation
.
1035 partitionReferenceNum
;
1036 laarr
[c
+ 1].extLength
=
1037 EXT_NOT_RECORDED_ALLOCATED
|
1039 inode
->i_sb
->s_blocksize_bits
);
1043 for (i
= start
+ 1; numalloc
&& i
< *endnum
; i
++) {
1044 int elen
= ((laarr
[i
].extLength
&
1045 UDF_EXTENT_LENGTH_MASK
) +
1046 inode
->i_sb
->s_blocksize
- 1) >>
1047 inode
->i_sb
->s_blocksize_bits
;
1049 if (elen
> numalloc
) {
1050 laarr
[i
].extLength
-=
1052 inode
->i_sb
->s_blocksize_bits
);
1056 if (*endnum
> (i
+ 1))
1059 sizeof(struct long_ad
) *
1060 (*endnum
- (i
+ 1)));
1065 UDF_I(inode
)->i_lenExtents
+=
1066 numalloc
<< inode
->i_sb
->s_blocksize_bits
;
1071 static void udf_merge_extents(struct inode
*inode
, struct kernel_long_ad
*laarr
,
1075 unsigned long blocksize
= inode
->i_sb
->s_blocksize
;
1076 unsigned char blocksize_bits
= inode
->i_sb
->s_blocksize_bits
;
1078 for (i
= 0; i
< (*endnum
- 1); i
++) {
1079 struct kernel_long_ad
*li
/*l[i]*/ = &laarr
[i
];
1080 struct kernel_long_ad
*lip1
/*l[i plus 1]*/ = &laarr
[i
+ 1];
1082 if (((li
->extLength
>> 30) == (lip1
->extLength
>> 30)) &&
1083 (((li
->extLength
>> 30) ==
1084 (EXT_NOT_RECORDED_NOT_ALLOCATED
>> 30)) ||
1085 ((lip1
->extLocation
.logicalBlockNum
-
1086 li
->extLocation
.logicalBlockNum
) ==
1087 (((li
->extLength
& UDF_EXTENT_LENGTH_MASK
) +
1088 blocksize
- 1) >> blocksize_bits
)))) {
1090 if (((li
->extLength
& UDF_EXTENT_LENGTH_MASK
) +
1091 (lip1
->extLength
& UDF_EXTENT_LENGTH_MASK
) +
1092 blocksize
- 1) & ~UDF_EXTENT_LENGTH_MASK
) {
1093 lip1
->extLength
= (lip1
->extLength
-
1095 UDF_EXTENT_LENGTH_MASK
) +
1096 UDF_EXTENT_LENGTH_MASK
) &
1098 li
->extLength
= (li
->extLength
&
1099 UDF_EXTENT_FLAG_MASK
) +
1100 (UDF_EXTENT_LENGTH_MASK
+ 1) -
1102 lip1
->extLocation
.logicalBlockNum
=
1103 li
->extLocation
.logicalBlockNum
+
1105 UDF_EXTENT_LENGTH_MASK
) >>
1108 li
->extLength
= lip1
->extLength
+
1110 UDF_EXTENT_LENGTH_MASK
) +
1111 blocksize
- 1) & ~(blocksize
- 1));
1112 if (*endnum
> (i
+ 2))
1113 memmove(&laarr
[i
+ 1], &laarr
[i
+ 2],
1114 sizeof(struct long_ad
) *
1115 (*endnum
- (i
+ 2)));
1119 } else if (((li
->extLength
>> 30) ==
1120 (EXT_NOT_RECORDED_ALLOCATED
>> 30)) &&
1121 ((lip1
->extLength
>> 30) ==
1122 (EXT_NOT_RECORDED_NOT_ALLOCATED
>> 30))) {
1123 udf_free_blocks(inode
->i_sb
, inode
, &li
->extLocation
, 0,
1125 UDF_EXTENT_LENGTH_MASK
) +
1126 blocksize
- 1) >> blocksize_bits
);
1127 li
->extLocation
.logicalBlockNum
= 0;
1128 li
->extLocation
.partitionReferenceNum
= 0;
1130 if (((li
->extLength
& UDF_EXTENT_LENGTH_MASK
) +
1131 (lip1
->extLength
& UDF_EXTENT_LENGTH_MASK
) +
1132 blocksize
- 1) & ~UDF_EXTENT_LENGTH_MASK
) {
1133 lip1
->extLength
= (lip1
->extLength
-
1135 UDF_EXTENT_LENGTH_MASK
) +
1136 UDF_EXTENT_LENGTH_MASK
) &
1138 li
->extLength
= (li
->extLength
&
1139 UDF_EXTENT_FLAG_MASK
) +
1140 (UDF_EXTENT_LENGTH_MASK
+ 1) -
1143 li
->extLength
= lip1
->extLength
+
1145 UDF_EXTENT_LENGTH_MASK
) +
1146 blocksize
- 1) & ~(blocksize
- 1));
1147 if (*endnum
> (i
+ 2))
1148 memmove(&laarr
[i
+ 1], &laarr
[i
+ 2],
1149 sizeof(struct long_ad
) *
1150 (*endnum
- (i
+ 2)));
1154 } else if ((li
->extLength
>> 30) ==
1155 (EXT_NOT_RECORDED_ALLOCATED
>> 30)) {
1156 udf_free_blocks(inode
->i_sb
, inode
,
1157 &li
->extLocation
, 0,
1159 UDF_EXTENT_LENGTH_MASK
) +
1160 blocksize
- 1) >> blocksize_bits
);
1161 li
->extLocation
.logicalBlockNum
= 0;
1162 li
->extLocation
.partitionReferenceNum
= 0;
1163 li
->extLength
= (li
->extLength
&
1164 UDF_EXTENT_LENGTH_MASK
) |
1165 EXT_NOT_RECORDED_NOT_ALLOCATED
;
1170 static void udf_update_extents(struct inode
*inode
, struct kernel_long_ad
*laarr
,
1171 int startnum
, int endnum
,
1172 struct extent_position
*epos
)
1175 struct kernel_lb_addr tmploc
;
1178 if (startnum
> endnum
) {
1179 for (i
= 0; i
< (startnum
- endnum
); i
++)
1180 udf_delete_aext(inode
, *epos
, laarr
[i
].extLocation
,
1181 laarr
[i
].extLength
);
1182 } else if (startnum
< endnum
) {
1183 for (i
= 0; i
< (endnum
- startnum
); i
++) {
1184 udf_insert_aext(inode
, *epos
, laarr
[i
].extLocation
,
1185 laarr
[i
].extLength
);
1186 udf_next_aext(inode
, epos
, &laarr
[i
].extLocation
,
1187 &laarr
[i
].extLength
, 1);
1192 for (i
= start
; i
< endnum
; i
++) {
1193 udf_next_aext(inode
, epos
, &tmploc
, &tmplen
, 0);
1194 udf_write_aext(inode
, epos
, &laarr
[i
].extLocation
,
1195 laarr
[i
].extLength
, 1);
1199 struct buffer_head
*udf_bread(struct inode
*inode
, int block
,
1200 int create
, int *err
)
1202 struct buffer_head
*bh
= NULL
;
1204 bh
= udf_getblk(inode
, block
, create
, err
);
1208 if (buffer_uptodate(bh
))
1211 ll_rw_block(REQ_OP_READ
, 0, 1, &bh
);
1214 if (buffer_uptodate(bh
))
1222 int udf_setsize(struct inode
*inode
, loff_t newsize
)
1225 struct udf_inode_info
*iinfo
;
1226 int bsize
= i_blocksize(inode
);
1228 if (!(S_ISREG(inode
->i_mode
) || S_ISDIR(inode
->i_mode
) ||
1229 S_ISLNK(inode
->i_mode
)))
1231 if (IS_APPEND(inode
) || IS_IMMUTABLE(inode
))
1234 iinfo
= UDF_I(inode
);
1235 if (newsize
> inode
->i_size
) {
1236 down_write(&iinfo
->i_data_sem
);
1237 if (iinfo
->i_alloc_type
== ICBTAG_FLAG_AD_IN_ICB
) {
1239 (udf_file_entry_alloc_offset(inode
) + newsize
)) {
1240 err
= udf_expand_file_adinicb(inode
);
1243 down_write(&iinfo
->i_data_sem
);
1245 iinfo
->i_lenAlloc
= newsize
;
1249 err
= udf_extend_file(inode
, newsize
);
1251 up_write(&iinfo
->i_data_sem
);
1255 up_write(&iinfo
->i_data_sem
);
1256 truncate_setsize(inode
, newsize
);
1258 if (iinfo
->i_alloc_type
== ICBTAG_FLAG_AD_IN_ICB
) {
1259 down_write(&iinfo
->i_data_sem
);
1260 udf_clear_extent_cache(inode
);
1261 memset(iinfo
->i_ext
.i_data
+ iinfo
->i_lenEAttr
+ newsize
,
1262 0x00, bsize
- newsize
-
1263 udf_file_entry_alloc_offset(inode
));
1264 iinfo
->i_lenAlloc
= newsize
;
1265 truncate_setsize(inode
, newsize
);
1266 up_write(&iinfo
->i_data_sem
);
1269 err
= block_truncate_page(inode
->i_mapping
, newsize
,
1273 truncate_setsize(inode
, newsize
);
1274 down_write(&iinfo
->i_data_sem
);
1275 udf_clear_extent_cache(inode
);
1276 udf_truncate_extents(inode
);
1277 up_write(&iinfo
->i_data_sem
);
1280 inode
->i_mtime
= inode
->i_ctime
= current_time(inode
);
1282 udf_sync_inode(inode
);
1284 mark_inode_dirty(inode
);
1289 * Maximum length of linked list formed by ICB hierarchy. The chosen number is
1290 * arbitrary - just that we hopefully don't limit any real use of rewritten
1291 * inode on write-once media but avoid looping for too long on corrupted media.
1293 #define UDF_MAX_ICB_NESTING 1024
1295 static int udf_read_inode(struct inode
*inode
, bool hidden_inode
)
1297 struct buffer_head
*bh
= NULL
;
1298 struct fileEntry
*fe
;
1299 struct extendedFileEntry
*efe
;
1301 struct udf_inode_info
*iinfo
= UDF_I(inode
);
1302 struct udf_sb_info
*sbi
= UDF_SB(inode
->i_sb
);
1303 struct kernel_lb_addr
*iloc
= &iinfo
->i_location
;
1304 unsigned int link_count
;
1305 unsigned int indirections
= 0;
1306 int bs
= inode
->i_sb
->s_blocksize
;
1310 if (iloc
->partitionReferenceNum
>= sbi
->s_partitions
) {
1311 udf_debug("partition reference: %d > logical volume partitions: %d\n",
1312 iloc
->partitionReferenceNum
, sbi
->s_partitions
);
1316 if (iloc
->logicalBlockNum
>=
1317 sbi
->s_partmaps
[iloc
->partitionReferenceNum
].s_partition_len
) {
1318 udf_debug("block=%d, partition=%d out of range\n",
1319 iloc
->logicalBlockNum
, iloc
->partitionReferenceNum
);
1324 * Set defaults, but the inode is still incomplete!
1325 * Note: get_new_inode() sets the following on a new inode:
1328 * i_flags = sb->s_flags
1330 * clean_inode(): zero fills and sets
1335 bh
= udf_read_ptagged(inode
->i_sb
, iloc
, 0, &ident
);
1337 udf_err(inode
->i_sb
, "(ino %ld) failed !bh\n", inode
->i_ino
);
1341 if (ident
!= TAG_IDENT_FE
&& ident
!= TAG_IDENT_EFE
&&
1342 ident
!= TAG_IDENT_USE
) {
1343 udf_err(inode
->i_sb
, "(ino %ld) failed ident=%d\n",
1344 inode
->i_ino
, ident
);
1348 fe
= (struct fileEntry
*)bh
->b_data
;
1349 efe
= (struct extendedFileEntry
*)bh
->b_data
;
1351 if (fe
->icbTag
.strategyType
== cpu_to_le16(4096)) {
1352 struct buffer_head
*ibh
;
1354 ibh
= udf_read_ptagged(inode
->i_sb
, iloc
, 1, &ident
);
1355 if (ident
== TAG_IDENT_IE
&& ibh
) {
1356 struct kernel_lb_addr loc
;
1357 struct indirectEntry
*ie
;
1359 ie
= (struct indirectEntry
*)ibh
->b_data
;
1360 loc
= lelb_to_cpu(ie
->indirectICB
.extLocation
);
1362 if (ie
->indirectICB
.extLength
) {
1364 memcpy(&iinfo
->i_location
, &loc
,
1365 sizeof(struct kernel_lb_addr
));
1366 if (++indirections
> UDF_MAX_ICB_NESTING
) {
1367 udf_err(inode
->i_sb
,
1368 "too many ICBs in ICB hierarchy"
1369 " (max %d supported)\n",
1370 UDF_MAX_ICB_NESTING
);
1378 } else if (fe
->icbTag
.strategyType
!= cpu_to_le16(4)) {
1379 udf_err(inode
->i_sb
, "unsupported strategy type: %d\n",
1380 le16_to_cpu(fe
->icbTag
.strategyType
));
1383 if (fe
->icbTag
.strategyType
== cpu_to_le16(4))
1384 iinfo
->i_strat4096
= 0;
1385 else /* if (fe->icbTag.strategyType == cpu_to_le16(4096)) */
1386 iinfo
->i_strat4096
= 1;
1388 iinfo
->i_alloc_type
= le16_to_cpu(fe
->icbTag
.flags
) &
1389 ICBTAG_FLAG_AD_MASK
;
1390 if (iinfo
->i_alloc_type
!= ICBTAG_FLAG_AD_SHORT
&&
1391 iinfo
->i_alloc_type
!= ICBTAG_FLAG_AD_LONG
&&
1392 iinfo
->i_alloc_type
!= ICBTAG_FLAG_AD_IN_ICB
) {
1396 iinfo
->i_unique
= 0;
1397 iinfo
->i_lenEAttr
= 0;
1398 iinfo
->i_lenExtents
= 0;
1399 iinfo
->i_lenAlloc
= 0;
1400 iinfo
->i_next_alloc_block
= 0;
1401 iinfo
->i_next_alloc_goal
= 0;
1402 if (fe
->descTag
.tagIdent
== cpu_to_le16(TAG_IDENT_EFE
)) {
1405 ret
= udf_alloc_i_data(inode
, bs
-
1406 sizeof(struct extendedFileEntry
));
1409 memcpy(iinfo
->i_ext
.i_data
,
1410 bh
->b_data
+ sizeof(struct extendedFileEntry
),
1411 bs
- sizeof(struct extendedFileEntry
));
1412 } else if (fe
->descTag
.tagIdent
== cpu_to_le16(TAG_IDENT_FE
)) {
1415 ret
= udf_alloc_i_data(inode
, bs
- sizeof(struct fileEntry
));
1418 memcpy(iinfo
->i_ext
.i_data
,
1419 bh
->b_data
+ sizeof(struct fileEntry
),
1420 bs
- sizeof(struct fileEntry
));
1421 } else if (fe
->descTag
.tagIdent
== cpu_to_le16(TAG_IDENT_USE
)) {
1424 iinfo
->i_lenAlloc
= le32_to_cpu(
1425 ((struct unallocSpaceEntry
*)bh
->b_data
)->
1427 ret
= udf_alloc_i_data(inode
, bs
-
1428 sizeof(struct unallocSpaceEntry
));
1431 memcpy(iinfo
->i_ext
.i_data
,
1432 bh
->b_data
+ sizeof(struct unallocSpaceEntry
),
1433 bs
- sizeof(struct unallocSpaceEntry
));
1438 read_lock(&sbi
->s_cred_lock
);
1439 i_uid_write(inode
, le32_to_cpu(fe
->uid
));
1440 if (!uid_valid(inode
->i_uid
) ||
1441 UDF_QUERY_FLAG(inode
->i_sb
, UDF_FLAG_UID_IGNORE
) ||
1442 UDF_QUERY_FLAG(inode
->i_sb
, UDF_FLAG_UID_SET
))
1443 inode
->i_uid
= UDF_SB(inode
->i_sb
)->s_uid
;
1445 i_gid_write(inode
, le32_to_cpu(fe
->gid
));
1446 if (!gid_valid(inode
->i_gid
) ||
1447 UDF_QUERY_FLAG(inode
->i_sb
, UDF_FLAG_GID_IGNORE
) ||
1448 UDF_QUERY_FLAG(inode
->i_sb
, UDF_FLAG_GID_SET
))
1449 inode
->i_gid
= UDF_SB(inode
->i_sb
)->s_gid
;
1451 if (fe
->icbTag
.fileType
!= ICBTAG_FILE_TYPE_DIRECTORY
&&
1452 sbi
->s_fmode
!= UDF_INVALID_MODE
)
1453 inode
->i_mode
= sbi
->s_fmode
;
1454 else if (fe
->icbTag
.fileType
== ICBTAG_FILE_TYPE_DIRECTORY
&&
1455 sbi
->s_dmode
!= UDF_INVALID_MODE
)
1456 inode
->i_mode
= sbi
->s_dmode
;
1458 inode
->i_mode
= udf_convert_permissions(fe
);
1459 inode
->i_mode
&= ~sbi
->s_umask
;
1460 read_unlock(&sbi
->s_cred_lock
);
1462 link_count
= le16_to_cpu(fe
->fileLinkCount
);
1464 if (!hidden_inode
) {
1470 set_nlink(inode
, link_count
);
1472 inode
->i_size
= le64_to_cpu(fe
->informationLength
);
1473 iinfo
->i_lenExtents
= inode
->i_size
;
1475 if (iinfo
->i_efe
== 0) {
1476 inode
->i_blocks
= le64_to_cpu(fe
->logicalBlocksRecorded
) <<
1477 (inode
->i_sb
->s_blocksize_bits
- 9);
1479 if (!udf_disk_stamp_to_time(&inode
->i_atime
, fe
->accessTime
))
1480 inode
->i_atime
= sbi
->s_record_time
;
1482 if (!udf_disk_stamp_to_time(&inode
->i_mtime
,
1483 fe
->modificationTime
))
1484 inode
->i_mtime
= sbi
->s_record_time
;
1486 if (!udf_disk_stamp_to_time(&inode
->i_ctime
, fe
->attrTime
))
1487 inode
->i_ctime
= sbi
->s_record_time
;
1489 iinfo
->i_unique
= le64_to_cpu(fe
->uniqueID
);
1490 iinfo
->i_lenEAttr
= le32_to_cpu(fe
->lengthExtendedAttr
);
1491 iinfo
->i_lenAlloc
= le32_to_cpu(fe
->lengthAllocDescs
);
1492 iinfo
->i_checkpoint
= le32_to_cpu(fe
->checkpoint
);
1494 inode
->i_blocks
= le64_to_cpu(efe
->logicalBlocksRecorded
) <<
1495 (inode
->i_sb
->s_blocksize_bits
- 9);
1497 if (!udf_disk_stamp_to_time(&inode
->i_atime
, efe
->accessTime
))
1498 inode
->i_atime
= sbi
->s_record_time
;
1500 if (!udf_disk_stamp_to_time(&inode
->i_mtime
,
1501 efe
->modificationTime
))
1502 inode
->i_mtime
= sbi
->s_record_time
;
1504 if (!udf_disk_stamp_to_time(&iinfo
->i_crtime
, efe
->createTime
))
1505 iinfo
->i_crtime
= sbi
->s_record_time
;
1507 if (!udf_disk_stamp_to_time(&inode
->i_ctime
, efe
->attrTime
))
1508 inode
->i_ctime
= sbi
->s_record_time
;
1510 iinfo
->i_unique
= le64_to_cpu(efe
->uniqueID
);
1511 iinfo
->i_lenEAttr
= le32_to_cpu(efe
->lengthExtendedAttr
);
1512 iinfo
->i_lenAlloc
= le32_to_cpu(efe
->lengthAllocDescs
);
1513 iinfo
->i_checkpoint
= le32_to_cpu(efe
->checkpoint
);
1515 inode
->i_generation
= iinfo
->i_unique
;
1518 * Sanity check length of allocation descriptors and extended attrs to
1519 * avoid integer overflows
1521 if (iinfo
->i_lenEAttr
> bs
|| iinfo
->i_lenAlloc
> bs
)
1523 /* Now do exact checks */
1524 if (udf_file_entry_alloc_offset(inode
) + iinfo
->i_lenAlloc
> bs
)
1526 /* Sanity checks for files in ICB so that we don't get confused later */
1527 if (iinfo
->i_alloc_type
== ICBTAG_FLAG_AD_IN_ICB
) {
1529 * For file in ICB data is stored in allocation descriptor
1530 * so sizes should match
1532 if (iinfo
->i_lenAlloc
!= inode
->i_size
)
1534 /* File in ICB has to fit in there... */
1535 if (inode
->i_size
> bs
- udf_file_entry_alloc_offset(inode
))
1539 switch (fe
->icbTag
.fileType
) {
1540 case ICBTAG_FILE_TYPE_DIRECTORY
:
1541 inode
->i_op
= &udf_dir_inode_operations
;
1542 inode
->i_fop
= &udf_dir_operations
;
1543 inode
->i_mode
|= S_IFDIR
;
1546 case ICBTAG_FILE_TYPE_REALTIME
:
1547 case ICBTAG_FILE_TYPE_REGULAR
:
1548 case ICBTAG_FILE_TYPE_UNDEF
:
1549 case ICBTAG_FILE_TYPE_VAT20
:
1550 if (iinfo
->i_alloc_type
== ICBTAG_FLAG_AD_IN_ICB
)
1551 inode
->i_data
.a_ops
= &udf_adinicb_aops
;
1553 inode
->i_data
.a_ops
= &udf_aops
;
1554 inode
->i_op
= &udf_file_inode_operations
;
1555 inode
->i_fop
= &udf_file_operations
;
1556 inode
->i_mode
|= S_IFREG
;
1558 case ICBTAG_FILE_TYPE_BLOCK
:
1559 inode
->i_mode
|= S_IFBLK
;
1561 case ICBTAG_FILE_TYPE_CHAR
:
1562 inode
->i_mode
|= S_IFCHR
;
1564 case ICBTAG_FILE_TYPE_FIFO
:
1565 init_special_inode(inode
, inode
->i_mode
| S_IFIFO
, 0);
1567 case ICBTAG_FILE_TYPE_SOCKET
:
1568 init_special_inode(inode
, inode
->i_mode
| S_IFSOCK
, 0);
1570 case ICBTAG_FILE_TYPE_SYMLINK
:
1571 inode
->i_data
.a_ops
= &udf_symlink_aops
;
1572 inode
->i_op
= &udf_symlink_inode_operations
;
1573 inode_nohighmem(inode
);
1574 inode
->i_mode
= S_IFLNK
| 0777;
1576 case ICBTAG_FILE_TYPE_MAIN
:
1577 udf_debug("METADATA FILE-----\n");
1579 case ICBTAG_FILE_TYPE_MIRROR
:
1580 udf_debug("METADATA MIRROR FILE-----\n");
1582 case ICBTAG_FILE_TYPE_BITMAP
:
1583 udf_debug("METADATA BITMAP FILE-----\n");
1586 udf_err(inode
->i_sb
, "(ino %ld) failed unknown file type=%d\n",
1587 inode
->i_ino
, fe
->icbTag
.fileType
);
1590 if (S_ISCHR(inode
->i_mode
) || S_ISBLK(inode
->i_mode
)) {
1591 struct deviceSpec
*dsea
=
1592 (struct deviceSpec
*)udf_get_extendedattr(inode
, 12, 1);
1594 init_special_inode(inode
, inode
->i_mode
,
1595 MKDEV(le32_to_cpu(dsea
->majorDeviceIdent
),
1596 le32_to_cpu(dsea
->minorDeviceIdent
)));
1597 /* Developer ID ??? */
1607 static int udf_alloc_i_data(struct inode
*inode
, size_t size
)
1609 struct udf_inode_info
*iinfo
= UDF_I(inode
);
1610 iinfo
->i_ext
.i_data
= kmalloc(size
, GFP_KERNEL
);
1611 if (!iinfo
->i_ext
.i_data
)
1616 static umode_t
udf_convert_permissions(struct fileEntry
*fe
)
1619 uint32_t permissions
;
1622 permissions
= le32_to_cpu(fe
->permissions
);
1623 flags
= le16_to_cpu(fe
->icbTag
.flags
);
1625 mode
= ((permissions
) & 0007) |
1626 ((permissions
>> 2) & 0070) |
1627 ((permissions
>> 4) & 0700) |
1628 ((flags
& ICBTAG_FLAG_SETUID
) ? S_ISUID
: 0) |
1629 ((flags
& ICBTAG_FLAG_SETGID
) ? S_ISGID
: 0) |
1630 ((flags
& ICBTAG_FLAG_STICKY
) ? S_ISVTX
: 0);
1635 int udf_write_inode(struct inode
*inode
, struct writeback_control
*wbc
)
1637 return udf_update_inode(inode
, wbc
->sync_mode
== WB_SYNC_ALL
);
1640 static int udf_sync_inode(struct inode
*inode
)
1642 return udf_update_inode(inode
, 1);
1645 static void udf_adjust_time(struct udf_inode_info
*iinfo
, struct timespec time
)
1647 if (iinfo
->i_crtime
.tv_sec
> time
.tv_sec
||
1648 (iinfo
->i_crtime
.tv_sec
== time
.tv_sec
&&
1649 iinfo
->i_crtime
.tv_nsec
> time
.tv_nsec
))
1650 iinfo
->i_crtime
= time
;
1653 static int udf_update_inode(struct inode
*inode
, int do_sync
)
1655 struct buffer_head
*bh
= NULL
;
1656 struct fileEntry
*fe
;
1657 struct extendedFileEntry
*efe
;
1658 uint64_t lb_recorded
;
1663 struct udf_sb_info
*sbi
= UDF_SB(inode
->i_sb
);
1664 unsigned char blocksize_bits
= inode
->i_sb
->s_blocksize_bits
;
1665 struct udf_inode_info
*iinfo
= UDF_I(inode
);
1667 bh
= udf_tgetblk(inode
->i_sb
,
1668 udf_get_lb_pblock(inode
->i_sb
, &iinfo
->i_location
, 0));
1670 udf_debug("getblk failure\n");
1675 memset(bh
->b_data
, 0, inode
->i_sb
->s_blocksize
);
1676 fe
= (struct fileEntry
*)bh
->b_data
;
1677 efe
= (struct extendedFileEntry
*)bh
->b_data
;
1680 struct unallocSpaceEntry
*use
=
1681 (struct unallocSpaceEntry
*)bh
->b_data
;
1683 use
->lengthAllocDescs
= cpu_to_le32(iinfo
->i_lenAlloc
);
1684 memcpy(bh
->b_data
+ sizeof(struct unallocSpaceEntry
),
1685 iinfo
->i_ext
.i_data
, inode
->i_sb
->s_blocksize
-
1686 sizeof(struct unallocSpaceEntry
));
1687 use
->descTag
.tagIdent
= cpu_to_le16(TAG_IDENT_USE
);
1688 crclen
= sizeof(struct unallocSpaceEntry
);
1693 if (UDF_QUERY_FLAG(inode
->i_sb
, UDF_FLAG_UID_FORGET
))
1694 fe
->uid
= cpu_to_le32(-1);
1696 fe
->uid
= cpu_to_le32(i_uid_read(inode
));
1698 if (UDF_QUERY_FLAG(inode
->i_sb
, UDF_FLAG_GID_FORGET
))
1699 fe
->gid
= cpu_to_le32(-1);
1701 fe
->gid
= cpu_to_le32(i_gid_read(inode
));
1703 udfperms
= ((inode
->i_mode
& 0007)) |
1704 ((inode
->i_mode
& 0070) << 2) |
1705 ((inode
->i_mode
& 0700) << 4);
1707 udfperms
|= (le32_to_cpu(fe
->permissions
) &
1708 (FE_PERM_O_DELETE
| FE_PERM_O_CHATTR
|
1709 FE_PERM_G_DELETE
| FE_PERM_G_CHATTR
|
1710 FE_PERM_U_DELETE
| FE_PERM_U_CHATTR
));
1711 fe
->permissions
= cpu_to_le32(udfperms
);
1713 if (S_ISDIR(inode
->i_mode
) && inode
->i_nlink
> 0)
1714 fe
->fileLinkCount
= cpu_to_le16(inode
->i_nlink
- 1);
1716 fe
->fileLinkCount
= cpu_to_le16(inode
->i_nlink
);
1718 fe
->informationLength
= cpu_to_le64(inode
->i_size
);
1720 if (S_ISCHR(inode
->i_mode
) || S_ISBLK(inode
->i_mode
)) {
1722 struct deviceSpec
*dsea
=
1723 (struct deviceSpec
*)udf_get_extendedattr(inode
, 12, 1);
1725 dsea
= (struct deviceSpec
*)
1726 udf_add_extendedattr(inode
,
1727 sizeof(struct deviceSpec
) +
1728 sizeof(struct regid
), 12, 0x3);
1729 dsea
->attrType
= cpu_to_le32(12);
1730 dsea
->attrSubtype
= 1;
1731 dsea
->attrLength
= cpu_to_le32(
1732 sizeof(struct deviceSpec
) +
1733 sizeof(struct regid
));
1734 dsea
->impUseLength
= cpu_to_le32(sizeof(struct regid
));
1736 eid
= (struct regid
*)dsea
->impUse
;
1737 memset(eid
, 0, sizeof(*eid
));
1738 strcpy(eid
->ident
, UDF_ID_DEVELOPER
);
1739 eid
->identSuffix
[0] = UDF_OS_CLASS_UNIX
;
1740 eid
->identSuffix
[1] = UDF_OS_ID_LINUX
;
1741 dsea
->majorDeviceIdent
= cpu_to_le32(imajor(inode
));
1742 dsea
->minorDeviceIdent
= cpu_to_le32(iminor(inode
));
1745 if (iinfo
->i_alloc_type
== ICBTAG_FLAG_AD_IN_ICB
)
1746 lb_recorded
= 0; /* No extents => no blocks! */
1749 (inode
->i_blocks
+ (1 << (blocksize_bits
- 9)) - 1) >>
1750 (blocksize_bits
- 9);
1752 if (iinfo
->i_efe
== 0) {
1753 memcpy(bh
->b_data
+ sizeof(struct fileEntry
),
1754 iinfo
->i_ext
.i_data
,
1755 inode
->i_sb
->s_blocksize
- sizeof(struct fileEntry
));
1756 fe
->logicalBlocksRecorded
= cpu_to_le64(lb_recorded
);
1758 udf_time_to_disk_stamp(&fe
->accessTime
, inode
->i_atime
);
1759 udf_time_to_disk_stamp(&fe
->modificationTime
, inode
->i_mtime
);
1760 udf_time_to_disk_stamp(&fe
->attrTime
, inode
->i_ctime
);
1761 memset(&(fe
->impIdent
), 0, sizeof(struct regid
));
1762 strcpy(fe
->impIdent
.ident
, UDF_ID_DEVELOPER
);
1763 fe
->impIdent
.identSuffix
[0] = UDF_OS_CLASS_UNIX
;
1764 fe
->impIdent
.identSuffix
[1] = UDF_OS_ID_LINUX
;
1765 fe
->uniqueID
= cpu_to_le64(iinfo
->i_unique
);
1766 fe
->lengthExtendedAttr
= cpu_to_le32(iinfo
->i_lenEAttr
);
1767 fe
->lengthAllocDescs
= cpu_to_le32(iinfo
->i_lenAlloc
);
1768 fe
->checkpoint
= cpu_to_le32(iinfo
->i_checkpoint
);
1769 fe
->descTag
.tagIdent
= cpu_to_le16(TAG_IDENT_FE
);
1770 crclen
= sizeof(struct fileEntry
);
1772 memcpy(bh
->b_data
+ sizeof(struct extendedFileEntry
),
1773 iinfo
->i_ext
.i_data
,
1774 inode
->i_sb
->s_blocksize
-
1775 sizeof(struct extendedFileEntry
));
1776 efe
->objectSize
= cpu_to_le64(inode
->i_size
);
1777 efe
->logicalBlocksRecorded
= cpu_to_le64(lb_recorded
);
1779 udf_adjust_time(iinfo
, inode
->i_atime
);
1780 udf_adjust_time(iinfo
, inode
->i_mtime
);
1781 udf_adjust_time(iinfo
, inode
->i_ctime
);
1783 udf_time_to_disk_stamp(&efe
->accessTime
, inode
->i_atime
);
1784 udf_time_to_disk_stamp(&efe
->modificationTime
, inode
->i_mtime
);
1785 udf_time_to_disk_stamp(&efe
->createTime
, iinfo
->i_crtime
);
1786 udf_time_to_disk_stamp(&efe
->attrTime
, inode
->i_ctime
);
1788 memset(&(efe
->impIdent
), 0, sizeof(efe
->impIdent
));
1789 strcpy(efe
->impIdent
.ident
, UDF_ID_DEVELOPER
);
1790 efe
->impIdent
.identSuffix
[0] = UDF_OS_CLASS_UNIX
;
1791 efe
->impIdent
.identSuffix
[1] = UDF_OS_ID_LINUX
;
1792 efe
->uniqueID
= cpu_to_le64(iinfo
->i_unique
);
1793 efe
->lengthExtendedAttr
= cpu_to_le32(iinfo
->i_lenEAttr
);
1794 efe
->lengthAllocDescs
= cpu_to_le32(iinfo
->i_lenAlloc
);
1795 efe
->checkpoint
= cpu_to_le32(iinfo
->i_checkpoint
);
1796 efe
->descTag
.tagIdent
= cpu_to_le16(TAG_IDENT_EFE
);
1797 crclen
= sizeof(struct extendedFileEntry
);
1801 if (iinfo
->i_strat4096
) {
1802 fe
->icbTag
.strategyType
= cpu_to_le16(4096);
1803 fe
->icbTag
.strategyParameter
= cpu_to_le16(1);
1804 fe
->icbTag
.numEntries
= cpu_to_le16(2);
1806 fe
->icbTag
.strategyType
= cpu_to_le16(4);
1807 fe
->icbTag
.numEntries
= cpu_to_le16(1);
1811 fe
->icbTag
.fileType
= ICBTAG_FILE_TYPE_USE
;
1812 else if (S_ISDIR(inode
->i_mode
))
1813 fe
->icbTag
.fileType
= ICBTAG_FILE_TYPE_DIRECTORY
;
1814 else if (S_ISREG(inode
->i_mode
))
1815 fe
->icbTag
.fileType
= ICBTAG_FILE_TYPE_REGULAR
;
1816 else if (S_ISLNK(inode
->i_mode
))
1817 fe
->icbTag
.fileType
= ICBTAG_FILE_TYPE_SYMLINK
;
1818 else if (S_ISBLK(inode
->i_mode
))
1819 fe
->icbTag
.fileType
= ICBTAG_FILE_TYPE_BLOCK
;
1820 else if (S_ISCHR(inode
->i_mode
))
1821 fe
->icbTag
.fileType
= ICBTAG_FILE_TYPE_CHAR
;
1822 else if (S_ISFIFO(inode
->i_mode
))
1823 fe
->icbTag
.fileType
= ICBTAG_FILE_TYPE_FIFO
;
1824 else if (S_ISSOCK(inode
->i_mode
))
1825 fe
->icbTag
.fileType
= ICBTAG_FILE_TYPE_SOCKET
;
1827 icbflags
= iinfo
->i_alloc_type
|
1828 ((inode
->i_mode
& S_ISUID
) ? ICBTAG_FLAG_SETUID
: 0) |
1829 ((inode
->i_mode
& S_ISGID
) ? ICBTAG_FLAG_SETGID
: 0) |
1830 ((inode
->i_mode
& S_ISVTX
) ? ICBTAG_FLAG_STICKY
: 0) |
1831 (le16_to_cpu(fe
->icbTag
.flags
) &
1832 ~(ICBTAG_FLAG_AD_MASK
| ICBTAG_FLAG_SETUID
|
1833 ICBTAG_FLAG_SETGID
| ICBTAG_FLAG_STICKY
));
1835 fe
->icbTag
.flags
= cpu_to_le16(icbflags
);
1836 if (sbi
->s_udfrev
>= 0x0200)
1837 fe
->descTag
.descVersion
= cpu_to_le16(3);
1839 fe
->descTag
.descVersion
= cpu_to_le16(2);
1840 fe
->descTag
.tagSerialNum
= cpu_to_le16(sbi
->s_serial_number
);
1841 fe
->descTag
.tagLocation
= cpu_to_le32(
1842 iinfo
->i_location
.logicalBlockNum
);
1843 crclen
+= iinfo
->i_lenEAttr
+ iinfo
->i_lenAlloc
- sizeof(struct tag
);
1844 fe
->descTag
.descCRCLength
= cpu_to_le16(crclen
);
1845 fe
->descTag
.descCRC
= cpu_to_le16(crc_itu_t(0, (char *)fe
+ sizeof(struct tag
),
1847 fe
->descTag
.tagChecksum
= udf_tag_checksum(&fe
->descTag
);
1849 set_buffer_uptodate(bh
);
1852 /* write the data blocks */
1853 mark_buffer_dirty(bh
);
1855 sync_dirty_buffer(bh
);
1856 if (buffer_write_io_error(bh
)) {
1857 udf_warn(inode
->i_sb
, "IO error syncing udf inode [%08lx]\n",
1867 struct inode
*__udf_iget(struct super_block
*sb
, struct kernel_lb_addr
*ino
,
1870 unsigned long block
= udf_get_lb_pblock(sb
, ino
, 0);
1871 struct inode
*inode
= iget_locked(sb
, block
);
1875 return ERR_PTR(-ENOMEM
);
1877 if (!(inode
->i_state
& I_NEW
))
1880 memcpy(&UDF_I(inode
)->i_location
, ino
, sizeof(struct kernel_lb_addr
));
1881 err
= udf_read_inode(inode
, hidden_inode
);
1884 return ERR_PTR(err
);
1886 unlock_new_inode(inode
);
1891 int udf_setup_indirect_aext(struct inode
*inode
, int block
,
1892 struct extent_position
*epos
)
1894 struct super_block
*sb
= inode
->i_sb
;
1895 struct buffer_head
*bh
;
1896 struct allocExtDesc
*aed
;
1897 struct extent_position nepos
;
1898 struct kernel_lb_addr neloc
;
1901 if (UDF_I(inode
)->i_alloc_type
== ICBTAG_FLAG_AD_SHORT
)
1902 adsize
= sizeof(struct short_ad
);
1903 else if (UDF_I(inode
)->i_alloc_type
== ICBTAG_FLAG_AD_LONG
)
1904 adsize
= sizeof(struct long_ad
);
1908 neloc
.logicalBlockNum
= block
;
1909 neloc
.partitionReferenceNum
= epos
->block
.partitionReferenceNum
;
1911 bh
= udf_tgetblk(sb
, udf_get_lb_pblock(sb
, &neloc
, 0));
1915 memset(bh
->b_data
, 0x00, sb
->s_blocksize
);
1916 set_buffer_uptodate(bh
);
1918 mark_buffer_dirty_inode(bh
, inode
);
1920 aed
= (struct allocExtDesc
*)(bh
->b_data
);
1921 if (!UDF_QUERY_FLAG(sb
, UDF_FLAG_STRICT
)) {
1922 aed
->previousAllocExtLocation
=
1923 cpu_to_le32(epos
->block
.logicalBlockNum
);
1925 aed
->lengthAllocDescs
= cpu_to_le32(0);
1926 if (UDF_SB(sb
)->s_udfrev
>= 0x0200)
1930 udf_new_tag(bh
->b_data
, TAG_IDENT_AED
, ver
, 1, block
,
1931 sizeof(struct tag
));
1933 nepos
.block
= neloc
;
1934 nepos
.offset
= sizeof(struct allocExtDesc
);
1938 * Do we have to copy current last extent to make space for indirect
1941 if (epos
->offset
+ adsize
> sb
->s_blocksize
) {
1942 struct kernel_lb_addr cp_loc
;
1946 epos
->offset
-= adsize
;
1947 cp_type
= udf_current_aext(inode
, epos
, &cp_loc
, &cp_len
, 0);
1948 cp_len
|= ((uint32_t)cp_type
) << 30;
1950 __udf_add_aext(inode
, &nepos
, &cp_loc
, cp_len
, 1);
1951 udf_write_aext(inode
, epos
, &nepos
.block
,
1952 sb
->s_blocksize
| EXT_NEXT_EXTENT_ALLOCDECS
, 0);
1954 __udf_add_aext(inode
, epos
, &nepos
.block
,
1955 sb
->s_blocksize
| EXT_NEXT_EXTENT_ALLOCDECS
, 0);
1965 * Append extent at the given position - should be the first free one in inode
1966 * / indirect extent. This function assumes there is enough space in the inode
1967 * or indirect extent. Use udf_add_aext() if you didn't check for this before.
1969 int __udf_add_aext(struct inode
*inode
, struct extent_position
*epos
,
1970 struct kernel_lb_addr
*eloc
, uint32_t elen
, int inc
)
1972 struct udf_inode_info
*iinfo
= UDF_I(inode
);
1973 struct allocExtDesc
*aed
;
1976 if (iinfo
->i_alloc_type
== ICBTAG_FLAG_AD_SHORT
)
1977 adsize
= sizeof(struct short_ad
);
1978 else if (iinfo
->i_alloc_type
== ICBTAG_FLAG_AD_LONG
)
1979 adsize
= sizeof(struct long_ad
);
1984 WARN_ON(iinfo
->i_lenAlloc
!=
1985 epos
->offset
- udf_file_entry_alloc_offset(inode
));
1987 aed
= (struct allocExtDesc
*)epos
->bh
->b_data
;
1988 WARN_ON(le32_to_cpu(aed
->lengthAllocDescs
) !=
1989 epos
->offset
- sizeof(struct allocExtDesc
));
1990 WARN_ON(epos
->offset
+ adsize
> inode
->i_sb
->s_blocksize
);
1993 udf_write_aext(inode
, epos
, eloc
, elen
, inc
);
1996 iinfo
->i_lenAlloc
+= adsize
;
1997 mark_inode_dirty(inode
);
1999 aed
= (struct allocExtDesc
*)epos
->bh
->b_data
;
2000 le32_add_cpu(&aed
->lengthAllocDescs
, adsize
);
2001 if (!UDF_QUERY_FLAG(inode
->i_sb
, UDF_FLAG_STRICT
) ||
2002 UDF_SB(inode
->i_sb
)->s_udfrev
>= 0x0201)
2003 udf_update_tag(epos
->bh
->b_data
,
2004 epos
->offset
+ (inc
? 0 : adsize
));
2006 udf_update_tag(epos
->bh
->b_data
,
2007 sizeof(struct allocExtDesc
));
2008 mark_buffer_dirty_inode(epos
->bh
, inode
);
2015 * Append extent at given position - should be the first free one in inode
2016 * / indirect extent. Takes care of allocating and linking indirect blocks.
2018 int udf_add_aext(struct inode
*inode
, struct extent_position
*epos
,
2019 struct kernel_lb_addr
*eloc
, uint32_t elen
, int inc
)
2022 struct super_block
*sb
= inode
->i_sb
;
2024 if (UDF_I(inode
)->i_alloc_type
== ICBTAG_FLAG_AD_SHORT
)
2025 adsize
= sizeof(struct short_ad
);
2026 else if (UDF_I(inode
)->i_alloc_type
== ICBTAG_FLAG_AD_LONG
)
2027 adsize
= sizeof(struct long_ad
);
2031 if (epos
->offset
+ (2 * adsize
) > sb
->s_blocksize
) {
2035 new_block
= udf_new_block(sb
, NULL
,
2036 epos
->block
.partitionReferenceNum
,
2037 epos
->block
.logicalBlockNum
, &err
);
2041 err
= udf_setup_indirect_aext(inode
, new_block
, epos
);
2046 return __udf_add_aext(inode
, epos
, eloc
, elen
, inc
);
2049 void udf_write_aext(struct inode
*inode
, struct extent_position
*epos
,
2050 struct kernel_lb_addr
*eloc
, uint32_t elen
, int inc
)
2054 struct short_ad
*sad
;
2055 struct long_ad
*lad
;
2056 struct udf_inode_info
*iinfo
= UDF_I(inode
);
2059 ptr
= iinfo
->i_ext
.i_data
+ epos
->offset
-
2060 udf_file_entry_alloc_offset(inode
) +
2063 ptr
= epos
->bh
->b_data
+ epos
->offset
;
2065 switch (iinfo
->i_alloc_type
) {
2066 case ICBTAG_FLAG_AD_SHORT
:
2067 sad
= (struct short_ad
*)ptr
;
2068 sad
->extLength
= cpu_to_le32(elen
);
2069 sad
->extPosition
= cpu_to_le32(eloc
->logicalBlockNum
);
2070 adsize
= sizeof(struct short_ad
);
2072 case ICBTAG_FLAG_AD_LONG
:
2073 lad
= (struct long_ad
*)ptr
;
2074 lad
->extLength
= cpu_to_le32(elen
);
2075 lad
->extLocation
= cpu_to_lelb(*eloc
);
2076 memset(lad
->impUse
, 0x00, sizeof(lad
->impUse
));
2077 adsize
= sizeof(struct long_ad
);
2084 if (!UDF_QUERY_FLAG(inode
->i_sb
, UDF_FLAG_STRICT
) ||
2085 UDF_SB(inode
->i_sb
)->s_udfrev
>= 0x0201) {
2086 struct allocExtDesc
*aed
=
2087 (struct allocExtDesc
*)epos
->bh
->b_data
;
2088 udf_update_tag(epos
->bh
->b_data
,
2089 le32_to_cpu(aed
->lengthAllocDescs
) +
2090 sizeof(struct allocExtDesc
));
2092 mark_buffer_dirty_inode(epos
->bh
, inode
);
2094 mark_inode_dirty(inode
);
2098 epos
->offset
+= adsize
;
2102 * Only 1 indirect extent in a row really makes sense but allow upto 16 in case
2103 * someone does some weird stuff.
2105 #define UDF_MAX_INDIR_EXTS 16
2107 int8_t udf_next_aext(struct inode
*inode
, struct extent_position
*epos
,
2108 struct kernel_lb_addr
*eloc
, uint32_t *elen
, int inc
)
2111 unsigned int indirections
= 0;
2113 while ((etype
= udf_current_aext(inode
, epos
, eloc
, elen
, inc
)) ==
2114 (EXT_NEXT_EXTENT_ALLOCDECS
>> 30)) {
2117 if (++indirections
> UDF_MAX_INDIR_EXTS
) {
2118 udf_err(inode
->i_sb
,
2119 "too many indirect extents in inode %lu\n",
2124 epos
->block
= *eloc
;
2125 epos
->offset
= sizeof(struct allocExtDesc
);
2127 block
= udf_get_lb_pblock(inode
->i_sb
, &epos
->block
, 0);
2128 epos
->bh
= udf_tread(inode
->i_sb
, block
);
2130 udf_debug("reading block %d failed!\n", block
);
2138 int8_t udf_current_aext(struct inode
*inode
, struct extent_position
*epos
,
2139 struct kernel_lb_addr
*eloc
, uint32_t *elen
, int inc
)
2144 struct short_ad
*sad
;
2145 struct long_ad
*lad
;
2146 struct udf_inode_info
*iinfo
= UDF_I(inode
);
2150 epos
->offset
= udf_file_entry_alloc_offset(inode
);
2151 ptr
= iinfo
->i_ext
.i_data
+ epos
->offset
-
2152 udf_file_entry_alloc_offset(inode
) +
2154 alen
= udf_file_entry_alloc_offset(inode
) +
2158 epos
->offset
= sizeof(struct allocExtDesc
);
2159 ptr
= epos
->bh
->b_data
+ epos
->offset
;
2160 alen
= sizeof(struct allocExtDesc
) +
2161 le32_to_cpu(((struct allocExtDesc
*)epos
->bh
->b_data
)->
2165 switch (iinfo
->i_alloc_type
) {
2166 case ICBTAG_FLAG_AD_SHORT
:
2167 sad
= udf_get_fileshortad(ptr
, alen
, &epos
->offset
, inc
);
2170 etype
= le32_to_cpu(sad
->extLength
) >> 30;
2171 eloc
->logicalBlockNum
= le32_to_cpu(sad
->extPosition
);
2172 eloc
->partitionReferenceNum
=
2173 iinfo
->i_location
.partitionReferenceNum
;
2174 *elen
= le32_to_cpu(sad
->extLength
) & UDF_EXTENT_LENGTH_MASK
;
2176 case ICBTAG_FLAG_AD_LONG
:
2177 lad
= udf_get_filelongad(ptr
, alen
, &epos
->offset
, inc
);
2180 etype
= le32_to_cpu(lad
->extLength
) >> 30;
2181 *eloc
= lelb_to_cpu(lad
->extLocation
);
2182 *elen
= le32_to_cpu(lad
->extLength
) & UDF_EXTENT_LENGTH_MASK
;
2185 udf_debug("alloc_type = %d unsupported\n", iinfo
->i_alloc_type
);
2192 static int8_t udf_insert_aext(struct inode
*inode
, struct extent_position epos
,
2193 struct kernel_lb_addr neloc
, uint32_t nelen
)
2195 struct kernel_lb_addr oeloc
;
2202 while ((etype
= udf_next_aext(inode
, &epos
, &oeloc
, &oelen
, 0)) != -1) {
2203 udf_write_aext(inode
, &epos
, &neloc
, nelen
, 1);
2205 nelen
= (etype
<< 30) | oelen
;
2207 udf_add_aext(inode
, &epos
, &neloc
, nelen
, 1);
2210 return (nelen
>> 30);
2213 int8_t udf_delete_aext(struct inode
*inode
, struct extent_position epos
,
2214 struct kernel_lb_addr eloc
, uint32_t elen
)
2216 struct extent_position oepos
;
2219 struct allocExtDesc
*aed
;
2220 struct udf_inode_info
*iinfo
;
2227 iinfo
= UDF_I(inode
);
2228 if (iinfo
->i_alloc_type
== ICBTAG_FLAG_AD_SHORT
)
2229 adsize
= sizeof(struct short_ad
);
2230 else if (iinfo
->i_alloc_type
== ICBTAG_FLAG_AD_LONG
)
2231 adsize
= sizeof(struct long_ad
);
2236 if (udf_next_aext(inode
, &epos
, &eloc
, &elen
, 1) == -1)
2239 while ((etype
= udf_next_aext(inode
, &epos
, &eloc
, &elen
, 1)) != -1) {
2240 udf_write_aext(inode
, &oepos
, &eloc
, (etype
<< 30) | elen
, 1);
2241 if (oepos
.bh
!= epos
.bh
) {
2242 oepos
.block
= epos
.block
;
2246 oepos
.offset
= epos
.offset
- adsize
;
2249 memset(&eloc
, 0x00, sizeof(struct kernel_lb_addr
));
2252 if (epos
.bh
!= oepos
.bh
) {
2253 udf_free_blocks(inode
->i_sb
, inode
, &epos
.block
, 0, 1);
2254 udf_write_aext(inode
, &oepos
, &eloc
, elen
, 1);
2255 udf_write_aext(inode
, &oepos
, &eloc
, elen
, 1);
2257 iinfo
->i_lenAlloc
-= (adsize
* 2);
2258 mark_inode_dirty(inode
);
2260 aed
= (struct allocExtDesc
*)oepos
.bh
->b_data
;
2261 le32_add_cpu(&aed
->lengthAllocDescs
, -(2 * adsize
));
2262 if (!UDF_QUERY_FLAG(inode
->i_sb
, UDF_FLAG_STRICT
) ||
2263 UDF_SB(inode
->i_sb
)->s_udfrev
>= 0x0201)
2264 udf_update_tag(oepos
.bh
->b_data
,
2265 oepos
.offset
- (2 * adsize
));
2267 udf_update_tag(oepos
.bh
->b_data
,
2268 sizeof(struct allocExtDesc
));
2269 mark_buffer_dirty_inode(oepos
.bh
, inode
);
2272 udf_write_aext(inode
, &oepos
, &eloc
, elen
, 1);
2274 iinfo
->i_lenAlloc
-= adsize
;
2275 mark_inode_dirty(inode
);
2277 aed
= (struct allocExtDesc
*)oepos
.bh
->b_data
;
2278 le32_add_cpu(&aed
->lengthAllocDescs
, -adsize
);
2279 if (!UDF_QUERY_FLAG(inode
->i_sb
, UDF_FLAG_STRICT
) ||
2280 UDF_SB(inode
->i_sb
)->s_udfrev
>= 0x0201)
2281 udf_update_tag(oepos
.bh
->b_data
,
2282 epos
.offset
- adsize
);
2284 udf_update_tag(oepos
.bh
->b_data
,
2285 sizeof(struct allocExtDesc
));
2286 mark_buffer_dirty_inode(oepos
.bh
, inode
);
2293 return (elen
>> 30);
2296 int8_t inode_bmap(struct inode
*inode
, sector_t block
,
2297 struct extent_position
*pos
, struct kernel_lb_addr
*eloc
,
2298 uint32_t *elen
, sector_t
*offset
)
2300 unsigned char blocksize_bits
= inode
->i_sb
->s_blocksize_bits
;
2301 loff_t lbcount
= 0, bcount
= (loff_t
) block
<< blocksize_bits
;
2303 struct udf_inode_info
*iinfo
;
2305 iinfo
= UDF_I(inode
);
2306 if (!udf_read_extent_cache(inode
, bcount
, &lbcount
, pos
)) {
2308 pos
->block
= iinfo
->i_location
;
2313 etype
= udf_next_aext(inode
, pos
, eloc
, elen
, 1);
2315 *offset
= (bcount
- lbcount
) >> blocksize_bits
;
2316 iinfo
->i_lenExtents
= lbcount
;
2320 } while (lbcount
<= bcount
);
2321 /* update extent cache */
2322 udf_update_extent_cache(inode
, lbcount
- *elen
, pos
);
2323 *offset
= (bcount
+ *elen
- lbcount
) >> blocksize_bits
;
2328 long udf_block_map(struct inode
*inode
, sector_t block
)
2330 struct kernel_lb_addr eloc
;
2333 struct extent_position epos
= {};
2336 down_read(&UDF_I(inode
)->i_data_sem
);
2338 if (inode_bmap(inode
, block
, &epos
, &eloc
, &elen
, &offset
) ==
2339 (EXT_RECORDED_ALLOCATED
>> 30))
2340 ret
= udf_get_lb_pblock(inode
->i_sb
, &eloc
, offset
);
2344 up_read(&UDF_I(inode
)->i_data_sem
);
2347 if (UDF_QUERY_FLAG(inode
->i_sb
, UDF_FLAG_VARCONV
))
2348 return udf_fixed_to_variable(ret
);