Merge branch 'v6v7' into devel
[linux/fpc-iii.git] / fs / udf / inode.c
blobc6a2e782b97b17d881fb7db61d798c51adf738d3
1 /*
2 * inode.c
4 * PURPOSE
5 * Inode handling routines for the OSTA-UDF(tm) filesystem.
7 * COPYRIGHT
8 * This file is distributed under the terms of the GNU General Public
9 * License (GPL). Copies of the GPL can be obtained from:
10 * ftp://prep.ai.mit.edu/pub/gnu/GPL
11 * Each contributing author retains all rights to their own work.
13 * (C) 1998 Dave Boynton
14 * (C) 1998-2004 Ben Fennema
15 * (C) 1999-2000 Stelias Computing Inc
17 * HISTORY
19 * 10/04/98 dgb Added rudimentary directory functions
20 * 10/07/98 Fully working udf_block_map! It works!
21 * 11/25/98 bmap altered to better support extents
22 * 12/06/98 blf partition support in udf_iget, udf_block_map
23 * and udf_read_inode
24 * 12/12/98 rewrote udf_block_map to handle next extents and descs across
25 * block boundaries (which is not actually allowed)
26 * 12/20/98 added support for strategy 4096
27 * 03/07/99 rewrote udf_block_map (again)
28 * New funcs, inode_bmap, udf_next_aext
29 * 04/19/99 Support for writing device EA's for major/minor #
32 #include "udfdecl.h"
33 #include <linux/mm.h>
34 #include <linux/module.h>
35 #include <linux/pagemap.h>
36 #include <linux/buffer_head.h>
37 #include <linux/writeback.h>
38 #include <linux/slab.h>
39 #include <linux/crc-itu-t.h>
41 #include "udf_i.h"
42 #include "udf_sb.h"
44 MODULE_AUTHOR("Ben Fennema");
45 MODULE_DESCRIPTION("Universal Disk Format Filesystem");
46 MODULE_LICENSE("GPL");
48 #define EXTENT_MERGE_SIZE 5
50 static mode_t udf_convert_permissions(struct fileEntry *);
51 static int udf_update_inode(struct inode *, int);
52 static void udf_fill_inode(struct inode *, struct buffer_head *);
53 static int udf_sync_inode(struct inode *inode);
54 static int udf_alloc_i_data(struct inode *inode, size_t size);
55 static struct buffer_head *inode_getblk(struct inode *, sector_t, int *,
56 sector_t *, int *);
57 static int8_t udf_insert_aext(struct inode *, struct extent_position,
58 struct kernel_lb_addr, uint32_t);
59 static void udf_split_extents(struct inode *, int *, int, int,
60 struct kernel_long_ad[EXTENT_MERGE_SIZE], int *);
61 static void udf_prealloc_extents(struct inode *, int, int,
62 struct kernel_long_ad[EXTENT_MERGE_SIZE], int *);
63 static void udf_merge_extents(struct inode *,
64 struct kernel_long_ad[EXTENT_MERGE_SIZE], int *);
65 static void udf_update_extents(struct inode *,
66 struct kernel_long_ad[EXTENT_MERGE_SIZE], int, int,
67 struct extent_position *);
68 static int udf_get_block(struct inode *, sector_t, struct buffer_head *, int);
71 void udf_evict_inode(struct inode *inode)
73 struct udf_inode_info *iinfo = UDF_I(inode);
74 int want_delete = 0;
76 truncate_inode_pages(&inode->i_data, 0);
78 if (!inode->i_nlink && !is_bad_inode(inode)) {
79 want_delete = 1;
80 inode->i_size = 0;
81 udf_truncate(inode);
82 udf_update_inode(inode, IS_SYNC(inode));
84 invalidate_inode_buffers(inode);
85 end_writeback(inode);
86 if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB &&
87 inode->i_size != iinfo->i_lenExtents) {
88 printk(KERN_WARNING "UDF-fs (%s): Inode %lu (mode %o) has "
89 "inode size %llu different from extent length %llu. "
90 "Filesystem need not be standards compliant.\n",
91 inode->i_sb->s_id, inode->i_ino, inode->i_mode,
92 (unsigned long long)inode->i_size,
93 (unsigned long long)iinfo->i_lenExtents);
95 kfree(iinfo->i_ext.i_data);
96 iinfo->i_ext.i_data = NULL;
97 if (want_delete) {
98 udf_free_inode(inode);
102 static int udf_writepage(struct page *page, struct writeback_control *wbc)
104 return block_write_full_page(page, udf_get_block, wbc);
107 static int udf_readpage(struct file *file, struct page *page)
109 return block_read_full_page(page, udf_get_block);
112 static int udf_write_begin(struct file *file, struct address_space *mapping,
113 loff_t pos, unsigned len, unsigned flags,
114 struct page **pagep, void **fsdata)
116 int ret;
118 ret = block_write_begin(mapping, pos, len, flags, pagep, udf_get_block);
119 if (unlikely(ret)) {
120 loff_t isize = mapping->host->i_size;
121 if (pos + len > isize)
122 vmtruncate(mapping->host, isize);
125 return ret;
128 static sector_t udf_bmap(struct address_space *mapping, sector_t block)
130 return generic_block_bmap(mapping, block, udf_get_block);
133 const struct address_space_operations udf_aops = {
134 .readpage = udf_readpage,
135 .writepage = udf_writepage,
136 .sync_page = block_sync_page,
137 .write_begin = udf_write_begin,
138 .write_end = generic_write_end,
139 .bmap = udf_bmap,
142 void udf_expand_file_adinicb(struct inode *inode, int newsize, int *err)
144 struct page *page;
145 char *kaddr;
146 struct udf_inode_info *iinfo = UDF_I(inode);
147 struct writeback_control udf_wbc = {
148 .sync_mode = WB_SYNC_NONE,
149 .nr_to_write = 1,
152 /* from now on we have normal address_space methods */
153 inode->i_data.a_ops = &udf_aops;
155 if (!iinfo->i_lenAlloc) {
156 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
157 iinfo->i_alloc_type = ICBTAG_FLAG_AD_SHORT;
158 else
159 iinfo->i_alloc_type = ICBTAG_FLAG_AD_LONG;
160 mark_inode_dirty(inode);
161 return;
164 page = grab_cache_page(inode->i_mapping, 0);
165 BUG_ON(!PageLocked(page));
167 if (!PageUptodate(page)) {
168 kaddr = kmap(page);
169 memset(kaddr + iinfo->i_lenAlloc, 0x00,
170 PAGE_CACHE_SIZE - iinfo->i_lenAlloc);
171 memcpy(kaddr, iinfo->i_ext.i_data + iinfo->i_lenEAttr,
172 iinfo->i_lenAlloc);
173 flush_dcache_page(page);
174 SetPageUptodate(page);
175 kunmap(page);
177 memset(iinfo->i_ext.i_data + iinfo->i_lenEAttr, 0x00,
178 iinfo->i_lenAlloc);
179 iinfo->i_lenAlloc = 0;
180 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
181 iinfo->i_alloc_type = ICBTAG_FLAG_AD_SHORT;
182 else
183 iinfo->i_alloc_type = ICBTAG_FLAG_AD_LONG;
185 inode->i_data.a_ops->writepage(page, &udf_wbc);
186 page_cache_release(page);
188 mark_inode_dirty(inode);
191 struct buffer_head *udf_expand_dir_adinicb(struct inode *inode, int *block,
192 int *err)
194 int newblock;
195 struct buffer_head *dbh = NULL;
196 struct kernel_lb_addr eloc;
197 uint8_t alloctype;
198 struct extent_position epos;
200 struct udf_fileident_bh sfibh, dfibh;
201 loff_t f_pos = udf_ext0_offset(inode);
202 int size = udf_ext0_offset(inode) + inode->i_size;
203 struct fileIdentDesc cfi, *sfi, *dfi;
204 struct udf_inode_info *iinfo = UDF_I(inode);
206 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
207 alloctype = ICBTAG_FLAG_AD_SHORT;
208 else
209 alloctype = ICBTAG_FLAG_AD_LONG;
211 if (!inode->i_size) {
212 iinfo->i_alloc_type = alloctype;
213 mark_inode_dirty(inode);
214 return NULL;
217 /* alloc block, and copy data to it */
218 *block = udf_new_block(inode->i_sb, inode,
219 iinfo->i_location.partitionReferenceNum,
220 iinfo->i_location.logicalBlockNum, err);
221 if (!(*block))
222 return NULL;
223 newblock = udf_get_pblock(inode->i_sb, *block,
224 iinfo->i_location.partitionReferenceNum,
226 if (!newblock)
227 return NULL;
228 dbh = udf_tgetblk(inode->i_sb, newblock);
229 if (!dbh)
230 return NULL;
231 lock_buffer(dbh);
232 memset(dbh->b_data, 0x00, inode->i_sb->s_blocksize);
233 set_buffer_uptodate(dbh);
234 unlock_buffer(dbh);
235 mark_buffer_dirty_inode(dbh, inode);
237 sfibh.soffset = sfibh.eoffset =
238 f_pos & (inode->i_sb->s_blocksize - 1);
239 sfibh.sbh = sfibh.ebh = NULL;
240 dfibh.soffset = dfibh.eoffset = 0;
241 dfibh.sbh = dfibh.ebh = dbh;
242 while (f_pos < size) {
243 iinfo->i_alloc_type = ICBTAG_FLAG_AD_IN_ICB;
244 sfi = udf_fileident_read(inode, &f_pos, &sfibh, &cfi, NULL,
245 NULL, NULL, NULL);
246 if (!sfi) {
247 brelse(dbh);
248 return NULL;
250 iinfo->i_alloc_type = alloctype;
251 sfi->descTag.tagLocation = cpu_to_le32(*block);
252 dfibh.soffset = dfibh.eoffset;
253 dfibh.eoffset += (sfibh.eoffset - sfibh.soffset);
254 dfi = (struct fileIdentDesc *)(dbh->b_data + dfibh.soffset);
255 if (udf_write_fi(inode, sfi, dfi, &dfibh, sfi->impUse,
256 sfi->fileIdent +
257 le16_to_cpu(sfi->lengthOfImpUse))) {
258 iinfo->i_alloc_type = ICBTAG_FLAG_AD_IN_ICB;
259 brelse(dbh);
260 return NULL;
263 mark_buffer_dirty_inode(dbh, inode);
265 memset(iinfo->i_ext.i_data + iinfo->i_lenEAttr, 0,
266 iinfo->i_lenAlloc);
267 iinfo->i_lenAlloc = 0;
268 eloc.logicalBlockNum = *block;
269 eloc.partitionReferenceNum =
270 iinfo->i_location.partitionReferenceNum;
271 iinfo->i_lenExtents = inode->i_size;
272 epos.bh = NULL;
273 epos.block = iinfo->i_location;
274 epos.offset = udf_file_entry_alloc_offset(inode);
275 udf_add_aext(inode, &epos, &eloc, inode->i_size, 0);
276 /* UniqueID stuff */
278 brelse(epos.bh);
279 mark_inode_dirty(inode);
280 return dbh;
283 static int udf_get_block(struct inode *inode, sector_t block,
284 struct buffer_head *bh_result, int create)
286 int err, new;
287 struct buffer_head *bh;
288 sector_t phys = 0;
289 struct udf_inode_info *iinfo;
291 if (!create) {
292 phys = udf_block_map(inode, block);
293 if (phys)
294 map_bh(bh_result, inode->i_sb, phys);
295 return 0;
298 err = -EIO;
299 new = 0;
300 bh = NULL;
301 iinfo = UDF_I(inode);
303 down_write(&iinfo->i_data_sem);
304 if (block == iinfo->i_next_alloc_block + 1) {
305 iinfo->i_next_alloc_block++;
306 iinfo->i_next_alloc_goal++;
309 err = 0;
311 bh = inode_getblk(inode, block, &err, &phys, &new);
312 BUG_ON(bh);
313 if (err)
314 goto abort;
315 BUG_ON(!phys);
317 if (new)
318 set_buffer_new(bh_result);
319 map_bh(bh_result, inode->i_sb, phys);
321 abort:
322 up_write(&iinfo->i_data_sem);
323 return err;
326 static struct buffer_head *udf_getblk(struct inode *inode, long block,
327 int create, int *err)
329 struct buffer_head *bh;
330 struct buffer_head dummy;
332 dummy.b_state = 0;
333 dummy.b_blocknr = -1000;
334 *err = udf_get_block(inode, block, &dummy, create);
335 if (!*err && buffer_mapped(&dummy)) {
336 bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
337 if (buffer_new(&dummy)) {
338 lock_buffer(bh);
339 memset(bh->b_data, 0x00, inode->i_sb->s_blocksize);
340 set_buffer_uptodate(bh);
341 unlock_buffer(bh);
342 mark_buffer_dirty_inode(bh, inode);
344 return bh;
347 return NULL;
350 /* Extend the file by 'blocks' blocks, return the number of extents added */
351 int udf_extend_file(struct inode *inode, struct extent_position *last_pos,
352 struct kernel_long_ad *last_ext, sector_t blocks)
354 sector_t add;
355 int count = 0, fake = !(last_ext->extLength & UDF_EXTENT_LENGTH_MASK);
356 struct super_block *sb = inode->i_sb;
357 struct kernel_lb_addr prealloc_loc = {};
358 int prealloc_len = 0;
359 struct udf_inode_info *iinfo;
361 /* The previous extent is fake and we should not extend by anything
362 * - there's nothing to do... */
363 if (!blocks && fake)
364 return 0;
366 iinfo = UDF_I(inode);
367 /* Round the last extent up to a multiple of block size */
368 if (last_ext->extLength & (sb->s_blocksize - 1)) {
369 last_ext->extLength =
370 (last_ext->extLength & UDF_EXTENT_FLAG_MASK) |
371 (((last_ext->extLength & UDF_EXTENT_LENGTH_MASK) +
372 sb->s_blocksize - 1) & ~(sb->s_blocksize - 1));
373 iinfo->i_lenExtents =
374 (iinfo->i_lenExtents + sb->s_blocksize - 1) &
375 ~(sb->s_blocksize - 1);
378 /* Last extent are just preallocated blocks? */
379 if ((last_ext->extLength & UDF_EXTENT_FLAG_MASK) ==
380 EXT_NOT_RECORDED_ALLOCATED) {
381 /* Save the extent so that we can reattach it to the end */
382 prealloc_loc = last_ext->extLocation;
383 prealloc_len = last_ext->extLength;
384 /* Mark the extent as a hole */
385 last_ext->extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
386 (last_ext->extLength & UDF_EXTENT_LENGTH_MASK);
387 last_ext->extLocation.logicalBlockNum = 0;
388 last_ext->extLocation.partitionReferenceNum = 0;
391 /* Can we merge with the previous extent? */
392 if ((last_ext->extLength & UDF_EXTENT_FLAG_MASK) ==
393 EXT_NOT_RECORDED_NOT_ALLOCATED) {
394 add = ((1 << 30) - sb->s_blocksize -
395 (last_ext->extLength & UDF_EXTENT_LENGTH_MASK)) >>
396 sb->s_blocksize_bits;
397 if (add > blocks)
398 add = blocks;
399 blocks -= add;
400 last_ext->extLength += add << sb->s_blocksize_bits;
403 if (fake) {
404 udf_add_aext(inode, last_pos, &last_ext->extLocation,
405 last_ext->extLength, 1);
406 count++;
407 } else
408 udf_write_aext(inode, last_pos, &last_ext->extLocation,
409 last_ext->extLength, 1);
411 /* Managed to do everything necessary? */
412 if (!blocks)
413 goto out;
415 /* All further extents will be NOT_RECORDED_NOT_ALLOCATED */
416 last_ext->extLocation.logicalBlockNum = 0;
417 last_ext->extLocation.partitionReferenceNum = 0;
418 add = (1 << (30-sb->s_blocksize_bits)) - 1;
419 last_ext->extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
420 (add << sb->s_blocksize_bits);
422 /* Create enough extents to cover the whole hole */
423 while (blocks > add) {
424 blocks -= add;
425 if (udf_add_aext(inode, last_pos, &last_ext->extLocation,
426 last_ext->extLength, 1) == -1)
427 return -1;
428 count++;
430 if (blocks) {
431 last_ext->extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
432 (blocks << sb->s_blocksize_bits);
433 if (udf_add_aext(inode, last_pos, &last_ext->extLocation,
434 last_ext->extLength, 1) == -1)
435 return -1;
436 count++;
439 out:
440 /* Do we have some preallocated blocks saved? */
441 if (prealloc_len) {
442 if (udf_add_aext(inode, last_pos, &prealloc_loc,
443 prealloc_len, 1) == -1)
444 return -1;
445 last_ext->extLocation = prealloc_loc;
446 last_ext->extLength = prealloc_len;
447 count++;
450 /* last_pos should point to the last written extent... */
451 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
452 last_pos->offset -= sizeof(struct short_ad);
453 else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
454 last_pos->offset -= sizeof(struct long_ad);
455 else
456 return -1;
458 return count;
461 static struct buffer_head *inode_getblk(struct inode *inode, sector_t block,
462 int *err, sector_t *phys, int *new)
464 static sector_t last_block;
465 struct buffer_head *result = NULL;
466 struct kernel_long_ad laarr[EXTENT_MERGE_SIZE];
467 struct extent_position prev_epos, cur_epos, next_epos;
468 int count = 0, startnum = 0, endnum = 0;
469 uint32_t elen = 0, tmpelen;
470 struct kernel_lb_addr eloc, tmpeloc;
471 int c = 1;
472 loff_t lbcount = 0, b_off = 0;
473 uint32_t newblocknum, newblock;
474 sector_t offset = 0;
475 int8_t etype;
476 struct udf_inode_info *iinfo = UDF_I(inode);
477 int goal = 0, pgoal = iinfo->i_location.logicalBlockNum;
478 int lastblock = 0;
480 prev_epos.offset = udf_file_entry_alloc_offset(inode);
481 prev_epos.block = iinfo->i_location;
482 prev_epos.bh = NULL;
483 cur_epos = next_epos = prev_epos;
484 b_off = (loff_t)block << inode->i_sb->s_blocksize_bits;
486 /* find the extent which contains the block we are looking for.
487 alternate between laarr[0] and laarr[1] for locations of the
488 current extent, and the previous extent */
489 do {
490 if (prev_epos.bh != cur_epos.bh) {
491 brelse(prev_epos.bh);
492 get_bh(cur_epos.bh);
493 prev_epos.bh = cur_epos.bh;
495 if (cur_epos.bh != next_epos.bh) {
496 brelse(cur_epos.bh);
497 get_bh(next_epos.bh);
498 cur_epos.bh = next_epos.bh;
501 lbcount += elen;
503 prev_epos.block = cur_epos.block;
504 cur_epos.block = next_epos.block;
506 prev_epos.offset = cur_epos.offset;
507 cur_epos.offset = next_epos.offset;
509 etype = udf_next_aext(inode, &next_epos, &eloc, &elen, 1);
510 if (etype == -1)
511 break;
513 c = !c;
515 laarr[c].extLength = (etype << 30) | elen;
516 laarr[c].extLocation = eloc;
518 if (etype != (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
519 pgoal = eloc.logicalBlockNum +
520 ((elen + inode->i_sb->s_blocksize - 1) >>
521 inode->i_sb->s_blocksize_bits);
523 count++;
524 } while (lbcount + elen <= b_off);
526 b_off -= lbcount;
527 offset = b_off >> inode->i_sb->s_blocksize_bits;
529 * Move prev_epos and cur_epos into indirect extent if we are at
530 * the pointer to it
532 udf_next_aext(inode, &prev_epos, &tmpeloc, &tmpelen, 0);
533 udf_next_aext(inode, &cur_epos, &tmpeloc, &tmpelen, 0);
535 /* if the extent is allocated and recorded, return the block
536 if the extent is not a multiple of the blocksize, round up */
538 if (etype == (EXT_RECORDED_ALLOCATED >> 30)) {
539 if (elen & (inode->i_sb->s_blocksize - 1)) {
540 elen = EXT_RECORDED_ALLOCATED |
541 ((elen + inode->i_sb->s_blocksize - 1) &
542 ~(inode->i_sb->s_blocksize - 1));
543 etype = udf_write_aext(inode, &cur_epos, &eloc, elen, 1);
545 brelse(prev_epos.bh);
546 brelse(cur_epos.bh);
547 brelse(next_epos.bh);
548 newblock = udf_get_lb_pblock(inode->i_sb, &eloc, offset);
549 *phys = newblock;
550 return NULL;
553 last_block = block;
554 /* Are we beyond EOF? */
555 if (etype == -1) {
556 int ret;
558 if (count) {
559 if (c)
560 laarr[0] = laarr[1];
561 startnum = 1;
562 } else {
563 /* Create a fake extent when there's not one */
564 memset(&laarr[0].extLocation, 0x00,
565 sizeof(struct kernel_lb_addr));
566 laarr[0].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED;
567 /* Will udf_extend_file() create real extent from
568 a fake one? */
569 startnum = (offset > 0);
571 /* Create extents for the hole between EOF and offset */
572 ret = udf_extend_file(inode, &prev_epos, laarr, offset);
573 if (ret == -1) {
574 brelse(prev_epos.bh);
575 brelse(cur_epos.bh);
576 brelse(next_epos.bh);
577 /* We don't really know the error here so we just make
578 * something up */
579 *err = -ENOSPC;
580 return NULL;
582 c = 0;
583 offset = 0;
584 count += ret;
585 /* We are not covered by a preallocated extent? */
586 if ((laarr[0].extLength & UDF_EXTENT_FLAG_MASK) !=
587 EXT_NOT_RECORDED_ALLOCATED) {
588 /* Is there any real extent? - otherwise we overwrite
589 * the fake one... */
590 if (count)
591 c = !c;
592 laarr[c].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
593 inode->i_sb->s_blocksize;
594 memset(&laarr[c].extLocation, 0x00,
595 sizeof(struct kernel_lb_addr));
596 count++;
597 endnum++;
599 endnum = c + 1;
600 lastblock = 1;
601 } else {
602 endnum = startnum = ((count > 2) ? 2 : count);
604 /* if the current extent is in position 0,
605 swap it with the previous */
606 if (!c && count != 1) {
607 laarr[2] = laarr[0];
608 laarr[0] = laarr[1];
609 laarr[1] = laarr[2];
610 c = 1;
613 /* if the current block is located in an extent,
614 read the next extent */
615 etype = udf_next_aext(inode, &next_epos, &eloc, &elen, 0);
616 if (etype != -1) {
617 laarr[c + 1].extLength = (etype << 30) | elen;
618 laarr[c + 1].extLocation = eloc;
619 count++;
620 startnum++;
621 endnum++;
622 } else
623 lastblock = 1;
626 /* if the current extent is not recorded but allocated, get the
627 * block in the extent corresponding to the requested block */
628 if ((laarr[c].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30))
629 newblocknum = laarr[c].extLocation.logicalBlockNum + offset;
630 else { /* otherwise, allocate a new block */
631 if (iinfo->i_next_alloc_block == block)
632 goal = iinfo->i_next_alloc_goal;
634 if (!goal) {
635 if (!(goal = pgoal)) /* XXX: what was intended here? */
636 goal = iinfo->i_location.logicalBlockNum + 1;
639 newblocknum = udf_new_block(inode->i_sb, inode,
640 iinfo->i_location.partitionReferenceNum,
641 goal, err);
642 if (!newblocknum) {
643 brelse(prev_epos.bh);
644 *err = -ENOSPC;
645 return NULL;
647 iinfo->i_lenExtents += inode->i_sb->s_blocksize;
650 /* if the extent the requsted block is located in contains multiple
651 * blocks, split the extent into at most three extents. blocks prior
652 * to requested block, requested block, and blocks after requested
653 * block */
654 udf_split_extents(inode, &c, offset, newblocknum, laarr, &endnum);
656 #ifdef UDF_PREALLOCATE
657 /* We preallocate blocks only for regular files. It also makes sense
658 * for directories but there's a problem when to drop the
659 * preallocation. We might use some delayed work for that but I feel
660 * it's overengineering for a filesystem like UDF. */
661 if (S_ISREG(inode->i_mode))
662 udf_prealloc_extents(inode, c, lastblock, laarr, &endnum);
663 #endif
665 /* merge any continuous blocks in laarr */
666 udf_merge_extents(inode, laarr, &endnum);
668 /* write back the new extents, inserting new extents if the new number
669 * of extents is greater than the old number, and deleting extents if
670 * the new number of extents is less than the old number */
671 udf_update_extents(inode, laarr, startnum, endnum, &prev_epos);
673 brelse(prev_epos.bh);
675 newblock = udf_get_pblock(inode->i_sb, newblocknum,
676 iinfo->i_location.partitionReferenceNum, 0);
677 if (!newblock)
678 return NULL;
679 *phys = newblock;
680 *err = 0;
681 *new = 1;
682 iinfo->i_next_alloc_block = block;
683 iinfo->i_next_alloc_goal = newblocknum;
684 inode->i_ctime = current_fs_time(inode->i_sb);
686 if (IS_SYNC(inode))
687 udf_sync_inode(inode);
688 else
689 mark_inode_dirty(inode);
691 return result;
694 static void udf_split_extents(struct inode *inode, int *c, int offset,
695 int newblocknum,
696 struct kernel_long_ad laarr[EXTENT_MERGE_SIZE],
697 int *endnum)
699 unsigned long blocksize = inode->i_sb->s_blocksize;
700 unsigned char blocksize_bits = inode->i_sb->s_blocksize_bits;
702 if ((laarr[*c].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30) ||
703 (laarr[*c].extLength >> 30) ==
704 (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) {
705 int curr = *c;
706 int blen = ((laarr[curr].extLength & UDF_EXTENT_LENGTH_MASK) +
707 blocksize - 1) >> blocksize_bits;
708 int8_t etype = (laarr[curr].extLength >> 30);
710 if (blen == 1)
712 else if (!offset || blen == offset + 1) {
713 laarr[curr + 2] = laarr[curr + 1];
714 laarr[curr + 1] = laarr[curr];
715 } else {
716 laarr[curr + 3] = laarr[curr + 1];
717 laarr[curr + 2] = laarr[curr + 1] = laarr[curr];
720 if (offset) {
721 if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30)) {
722 udf_free_blocks(inode->i_sb, inode,
723 &laarr[curr].extLocation,
724 0, offset);
725 laarr[curr].extLength =
726 EXT_NOT_RECORDED_NOT_ALLOCATED |
727 (offset << blocksize_bits);
728 laarr[curr].extLocation.logicalBlockNum = 0;
729 laarr[curr].extLocation.
730 partitionReferenceNum = 0;
731 } else
732 laarr[curr].extLength = (etype << 30) |
733 (offset << blocksize_bits);
734 curr++;
735 (*c)++;
736 (*endnum)++;
739 laarr[curr].extLocation.logicalBlockNum = newblocknum;
740 if (etype == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
741 laarr[curr].extLocation.partitionReferenceNum =
742 UDF_I(inode)->i_location.partitionReferenceNum;
743 laarr[curr].extLength = EXT_RECORDED_ALLOCATED |
744 blocksize;
745 curr++;
747 if (blen != offset + 1) {
748 if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30))
749 laarr[curr].extLocation.logicalBlockNum +=
750 offset + 1;
751 laarr[curr].extLength = (etype << 30) |
752 ((blen - (offset + 1)) << blocksize_bits);
753 curr++;
754 (*endnum)++;
759 static void udf_prealloc_extents(struct inode *inode, int c, int lastblock,
760 struct kernel_long_ad laarr[EXTENT_MERGE_SIZE],
761 int *endnum)
763 int start, length = 0, currlength = 0, i;
765 if (*endnum >= (c + 1)) {
766 if (!lastblock)
767 return;
768 else
769 start = c;
770 } else {
771 if ((laarr[c + 1].extLength >> 30) ==
772 (EXT_NOT_RECORDED_ALLOCATED >> 30)) {
773 start = c + 1;
774 length = currlength =
775 (((laarr[c + 1].extLength &
776 UDF_EXTENT_LENGTH_MASK) +
777 inode->i_sb->s_blocksize - 1) >>
778 inode->i_sb->s_blocksize_bits);
779 } else
780 start = c;
783 for (i = start + 1; i <= *endnum; i++) {
784 if (i == *endnum) {
785 if (lastblock)
786 length += UDF_DEFAULT_PREALLOC_BLOCKS;
787 } else if ((laarr[i].extLength >> 30) ==
788 (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) {
789 length += (((laarr[i].extLength &
790 UDF_EXTENT_LENGTH_MASK) +
791 inode->i_sb->s_blocksize - 1) >>
792 inode->i_sb->s_blocksize_bits);
793 } else
794 break;
797 if (length) {
798 int next = laarr[start].extLocation.logicalBlockNum +
799 (((laarr[start].extLength & UDF_EXTENT_LENGTH_MASK) +
800 inode->i_sb->s_blocksize - 1) >>
801 inode->i_sb->s_blocksize_bits);
802 int numalloc = udf_prealloc_blocks(inode->i_sb, inode,
803 laarr[start].extLocation.partitionReferenceNum,
804 next, (UDF_DEFAULT_PREALLOC_BLOCKS > length ?
805 length : UDF_DEFAULT_PREALLOC_BLOCKS) -
806 currlength);
807 if (numalloc) {
808 if (start == (c + 1))
809 laarr[start].extLength +=
810 (numalloc <<
811 inode->i_sb->s_blocksize_bits);
812 else {
813 memmove(&laarr[c + 2], &laarr[c + 1],
814 sizeof(struct long_ad) * (*endnum - (c + 1)));
815 (*endnum)++;
816 laarr[c + 1].extLocation.logicalBlockNum = next;
817 laarr[c + 1].extLocation.partitionReferenceNum =
818 laarr[c].extLocation.
819 partitionReferenceNum;
820 laarr[c + 1].extLength =
821 EXT_NOT_RECORDED_ALLOCATED |
822 (numalloc <<
823 inode->i_sb->s_blocksize_bits);
824 start = c + 1;
827 for (i = start + 1; numalloc && i < *endnum; i++) {
828 int elen = ((laarr[i].extLength &
829 UDF_EXTENT_LENGTH_MASK) +
830 inode->i_sb->s_blocksize - 1) >>
831 inode->i_sb->s_blocksize_bits;
833 if (elen > numalloc) {
834 laarr[i].extLength -=
835 (numalloc <<
836 inode->i_sb->s_blocksize_bits);
837 numalloc = 0;
838 } else {
839 numalloc -= elen;
840 if (*endnum > (i + 1))
841 memmove(&laarr[i],
842 &laarr[i + 1],
843 sizeof(struct long_ad) *
844 (*endnum - (i + 1)));
845 i--;
846 (*endnum)--;
849 UDF_I(inode)->i_lenExtents +=
850 numalloc << inode->i_sb->s_blocksize_bits;
855 static void udf_merge_extents(struct inode *inode,
856 struct kernel_long_ad laarr[EXTENT_MERGE_SIZE],
857 int *endnum)
859 int i;
860 unsigned long blocksize = inode->i_sb->s_blocksize;
861 unsigned char blocksize_bits = inode->i_sb->s_blocksize_bits;
863 for (i = 0; i < (*endnum - 1); i++) {
864 struct kernel_long_ad *li /*l[i]*/ = &laarr[i];
865 struct kernel_long_ad *lip1 /*l[i plus 1]*/ = &laarr[i + 1];
867 if (((li->extLength >> 30) == (lip1->extLength >> 30)) &&
868 (((li->extLength >> 30) ==
869 (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) ||
870 ((lip1->extLocation.logicalBlockNum -
871 li->extLocation.logicalBlockNum) ==
872 (((li->extLength & UDF_EXTENT_LENGTH_MASK) +
873 blocksize - 1) >> blocksize_bits)))) {
875 if (((li->extLength & UDF_EXTENT_LENGTH_MASK) +
876 (lip1->extLength & UDF_EXTENT_LENGTH_MASK) +
877 blocksize - 1) & ~UDF_EXTENT_LENGTH_MASK) {
878 lip1->extLength = (lip1->extLength -
879 (li->extLength &
880 UDF_EXTENT_LENGTH_MASK) +
881 UDF_EXTENT_LENGTH_MASK) &
882 ~(blocksize - 1);
883 li->extLength = (li->extLength &
884 UDF_EXTENT_FLAG_MASK) +
885 (UDF_EXTENT_LENGTH_MASK + 1) -
886 blocksize;
887 lip1->extLocation.logicalBlockNum =
888 li->extLocation.logicalBlockNum +
889 ((li->extLength &
890 UDF_EXTENT_LENGTH_MASK) >>
891 blocksize_bits);
892 } else {
893 li->extLength = lip1->extLength +
894 (((li->extLength &
895 UDF_EXTENT_LENGTH_MASK) +
896 blocksize - 1) & ~(blocksize - 1));
897 if (*endnum > (i + 2))
898 memmove(&laarr[i + 1], &laarr[i + 2],
899 sizeof(struct long_ad) *
900 (*endnum - (i + 2)));
901 i--;
902 (*endnum)--;
904 } else if (((li->extLength >> 30) ==
905 (EXT_NOT_RECORDED_ALLOCATED >> 30)) &&
906 ((lip1->extLength >> 30) ==
907 (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))) {
908 udf_free_blocks(inode->i_sb, inode, &li->extLocation, 0,
909 ((li->extLength &
910 UDF_EXTENT_LENGTH_MASK) +
911 blocksize - 1) >> blocksize_bits);
912 li->extLocation.logicalBlockNum = 0;
913 li->extLocation.partitionReferenceNum = 0;
915 if (((li->extLength & UDF_EXTENT_LENGTH_MASK) +
916 (lip1->extLength & UDF_EXTENT_LENGTH_MASK) +
917 blocksize - 1) & ~UDF_EXTENT_LENGTH_MASK) {
918 lip1->extLength = (lip1->extLength -
919 (li->extLength &
920 UDF_EXTENT_LENGTH_MASK) +
921 UDF_EXTENT_LENGTH_MASK) &
922 ~(blocksize - 1);
923 li->extLength = (li->extLength &
924 UDF_EXTENT_FLAG_MASK) +
925 (UDF_EXTENT_LENGTH_MASK + 1) -
926 blocksize;
927 } else {
928 li->extLength = lip1->extLength +
929 (((li->extLength &
930 UDF_EXTENT_LENGTH_MASK) +
931 blocksize - 1) & ~(blocksize - 1));
932 if (*endnum > (i + 2))
933 memmove(&laarr[i + 1], &laarr[i + 2],
934 sizeof(struct long_ad) *
935 (*endnum - (i + 2)));
936 i--;
937 (*endnum)--;
939 } else if ((li->extLength >> 30) ==
940 (EXT_NOT_RECORDED_ALLOCATED >> 30)) {
941 udf_free_blocks(inode->i_sb, inode,
942 &li->extLocation, 0,
943 ((li->extLength &
944 UDF_EXTENT_LENGTH_MASK) +
945 blocksize - 1) >> blocksize_bits);
946 li->extLocation.logicalBlockNum = 0;
947 li->extLocation.partitionReferenceNum = 0;
948 li->extLength = (li->extLength &
949 UDF_EXTENT_LENGTH_MASK) |
950 EXT_NOT_RECORDED_NOT_ALLOCATED;
955 static void udf_update_extents(struct inode *inode,
956 struct kernel_long_ad laarr[EXTENT_MERGE_SIZE],
957 int startnum, int endnum,
958 struct extent_position *epos)
960 int start = 0, i;
961 struct kernel_lb_addr tmploc;
962 uint32_t tmplen;
964 if (startnum > endnum) {
965 for (i = 0; i < (startnum - endnum); i++)
966 udf_delete_aext(inode, *epos, laarr[i].extLocation,
967 laarr[i].extLength);
968 } else if (startnum < endnum) {
969 for (i = 0; i < (endnum - startnum); i++) {
970 udf_insert_aext(inode, *epos, laarr[i].extLocation,
971 laarr[i].extLength);
972 udf_next_aext(inode, epos, &laarr[i].extLocation,
973 &laarr[i].extLength, 1);
974 start++;
978 for (i = start; i < endnum; i++) {
979 udf_next_aext(inode, epos, &tmploc, &tmplen, 0);
980 udf_write_aext(inode, epos, &laarr[i].extLocation,
981 laarr[i].extLength, 1);
985 struct buffer_head *udf_bread(struct inode *inode, int block,
986 int create, int *err)
988 struct buffer_head *bh = NULL;
990 bh = udf_getblk(inode, block, create, err);
991 if (!bh)
992 return NULL;
994 if (buffer_uptodate(bh))
995 return bh;
997 ll_rw_block(READ, 1, &bh);
999 wait_on_buffer(bh);
1000 if (buffer_uptodate(bh))
1001 return bh;
1003 brelse(bh);
1004 *err = -EIO;
1005 return NULL;
1008 void udf_truncate(struct inode *inode)
1010 int offset;
1011 int err;
1012 struct udf_inode_info *iinfo;
1014 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
1015 S_ISLNK(inode->i_mode)))
1016 return;
1017 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
1018 return;
1020 iinfo = UDF_I(inode);
1021 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
1022 down_write(&iinfo->i_data_sem);
1023 if (inode->i_sb->s_blocksize <
1024 (udf_file_entry_alloc_offset(inode) +
1025 inode->i_size)) {
1026 udf_expand_file_adinicb(inode, inode->i_size, &err);
1027 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
1028 inode->i_size = iinfo->i_lenAlloc;
1029 up_write(&iinfo->i_data_sem);
1030 return;
1031 } else
1032 udf_truncate_extents(inode);
1033 } else {
1034 offset = inode->i_size & (inode->i_sb->s_blocksize - 1);
1035 memset(iinfo->i_ext.i_data + iinfo->i_lenEAttr + offset,
1036 0x00, inode->i_sb->s_blocksize -
1037 offset - udf_file_entry_alloc_offset(inode));
1038 iinfo->i_lenAlloc = inode->i_size;
1040 up_write(&iinfo->i_data_sem);
1041 } else {
1042 block_truncate_page(inode->i_mapping, inode->i_size,
1043 udf_get_block);
1044 down_write(&iinfo->i_data_sem);
1045 udf_truncate_extents(inode);
1046 up_write(&iinfo->i_data_sem);
1049 inode->i_mtime = inode->i_ctime = current_fs_time(inode->i_sb);
1050 if (IS_SYNC(inode))
1051 udf_sync_inode(inode);
1052 else
1053 mark_inode_dirty(inode);
1056 static void __udf_read_inode(struct inode *inode)
1058 struct buffer_head *bh = NULL;
1059 struct fileEntry *fe;
1060 uint16_t ident;
1061 struct udf_inode_info *iinfo = UDF_I(inode);
1064 * Set defaults, but the inode is still incomplete!
1065 * Note: get_new_inode() sets the following on a new inode:
1066 * i_sb = sb
1067 * i_no = ino
1068 * i_flags = sb->s_flags
1069 * i_state = 0
1070 * clean_inode(): zero fills and sets
1071 * i_count = 1
1072 * i_nlink = 1
1073 * i_op = NULL;
1075 bh = udf_read_ptagged(inode->i_sb, &iinfo->i_location, 0, &ident);
1076 if (!bh) {
1077 printk(KERN_ERR "udf: udf_read_inode(ino %ld) failed !bh\n",
1078 inode->i_ino);
1079 make_bad_inode(inode);
1080 return;
1083 if (ident != TAG_IDENT_FE && ident != TAG_IDENT_EFE &&
1084 ident != TAG_IDENT_USE) {
1085 printk(KERN_ERR "udf: udf_read_inode(ino %ld) "
1086 "failed ident=%d\n", inode->i_ino, ident);
1087 brelse(bh);
1088 make_bad_inode(inode);
1089 return;
1092 fe = (struct fileEntry *)bh->b_data;
1094 if (fe->icbTag.strategyType == cpu_to_le16(4096)) {
1095 struct buffer_head *ibh;
1097 ibh = udf_read_ptagged(inode->i_sb, &iinfo->i_location, 1,
1098 &ident);
1099 if (ident == TAG_IDENT_IE && ibh) {
1100 struct buffer_head *nbh = NULL;
1101 struct kernel_lb_addr loc;
1102 struct indirectEntry *ie;
1104 ie = (struct indirectEntry *)ibh->b_data;
1105 loc = lelb_to_cpu(ie->indirectICB.extLocation);
1107 if (ie->indirectICB.extLength &&
1108 (nbh = udf_read_ptagged(inode->i_sb, &loc, 0,
1109 &ident))) {
1110 if (ident == TAG_IDENT_FE ||
1111 ident == TAG_IDENT_EFE) {
1112 memcpy(&iinfo->i_location,
1113 &loc,
1114 sizeof(struct kernel_lb_addr));
1115 brelse(bh);
1116 brelse(ibh);
1117 brelse(nbh);
1118 __udf_read_inode(inode);
1119 return;
1121 brelse(nbh);
1124 brelse(ibh);
1125 } else if (fe->icbTag.strategyType != cpu_to_le16(4)) {
1126 printk(KERN_ERR "udf: unsupported strategy type: %d\n",
1127 le16_to_cpu(fe->icbTag.strategyType));
1128 brelse(bh);
1129 make_bad_inode(inode);
1130 return;
1132 udf_fill_inode(inode, bh);
1134 brelse(bh);
1137 static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
1139 struct fileEntry *fe;
1140 struct extendedFileEntry *efe;
1141 int offset;
1142 struct udf_sb_info *sbi = UDF_SB(inode->i_sb);
1143 struct udf_inode_info *iinfo = UDF_I(inode);
1145 fe = (struct fileEntry *)bh->b_data;
1146 efe = (struct extendedFileEntry *)bh->b_data;
1148 if (fe->icbTag.strategyType == cpu_to_le16(4))
1149 iinfo->i_strat4096 = 0;
1150 else /* if (fe->icbTag.strategyType == cpu_to_le16(4096)) */
1151 iinfo->i_strat4096 = 1;
1153 iinfo->i_alloc_type = le16_to_cpu(fe->icbTag.flags) &
1154 ICBTAG_FLAG_AD_MASK;
1155 iinfo->i_unique = 0;
1156 iinfo->i_lenEAttr = 0;
1157 iinfo->i_lenExtents = 0;
1158 iinfo->i_lenAlloc = 0;
1159 iinfo->i_next_alloc_block = 0;
1160 iinfo->i_next_alloc_goal = 0;
1161 if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_EFE)) {
1162 iinfo->i_efe = 1;
1163 iinfo->i_use = 0;
1164 if (udf_alloc_i_data(inode, inode->i_sb->s_blocksize -
1165 sizeof(struct extendedFileEntry))) {
1166 make_bad_inode(inode);
1167 return;
1169 memcpy(iinfo->i_ext.i_data,
1170 bh->b_data + sizeof(struct extendedFileEntry),
1171 inode->i_sb->s_blocksize -
1172 sizeof(struct extendedFileEntry));
1173 } else if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_FE)) {
1174 iinfo->i_efe = 0;
1175 iinfo->i_use = 0;
1176 if (udf_alloc_i_data(inode, inode->i_sb->s_blocksize -
1177 sizeof(struct fileEntry))) {
1178 make_bad_inode(inode);
1179 return;
1181 memcpy(iinfo->i_ext.i_data,
1182 bh->b_data + sizeof(struct fileEntry),
1183 inode->i_sb->s_blocksize - sizeof(struct fileEntry));
1184 } else if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_USE)) {
1185 iinfo->i_efe = 0;
1186 iinfo->i_use = 1;
1187 iinfo->i_lenAlloc = le32_to_cpu(
1188 ((struct unallocSpaceEntry *)bh->b_data)->
1189 lengthAllocDescs);
1190 if (udf_alloc_i_data(inode, inode->i_sb->s_blocksize -
1191 sizeof(struct unallocSpaceEntry))) {
1192 make_bad_inode(inode);
1193 return;
1195 memcpy(iinfo->i_ext.i_data,
1196 bh->b_data + sizeof(struct unallocSpaceEntry),
1197 inode->i_sb->s_blocksize -
1198 sizeof(struct unallocSpaceEntry));
1199 return;
1202 read_lock(&sbi->s_cred_lock);
1203 inode->i_uid = le32_to_cpu(fe->uid);
1204 if (inode->i_uid == -1 ||
1205 UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_IGNORE) ||
1206 UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_SET))
1207 inode->i_uid = UDF_SB(inode->i_sb)->s_uid;
1209 inode->i_gid = le32_to_cpu(fe->gid);
1210 if (inode->i_gid == -1 ||
1211 UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_GID_IGNORE) ||
1212 UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_GID_SET))
1213 inode->i_gid = UDF_SB(inode->i_sb)->s_gid;
1215 if (fe->icbTag.fileType != ICBTAG_FILE_TYPE_DIRECTORY &&
1216 sbi->s_fmode != UDF_INVALID_MODE)
1217 inode->i_mode = sbi->s_fmode;
1218 else if (fe->icbTag.fileType == ICBTAG_FILE_TYPE_DIRECTORY &&
1219 sbi->s_dmode != UDF_INVALID_MODE)
1220 inode->i_mode = sbi->s_dmode;
1221 else
1222 inode->i_mode = udf_convert_permissions(fe);
1223 inode->i_mode &= ~sbi->s_umask;
1224 read_unlock(&sbi->s_cred_lock);
1226 inode->i_nlink = le16_to_cpu(fe->fileLinkCount);
1227 if (!inode->i_nlink)
1228 inode->i_nlink = 1;
1230 inode->i_size = le64_to_cpu(fe->informationLength);
1231 iinfo->i_lenExtents = inode->i_size;
1233 if (iinfo->i_efe == 0) {
1234 inode->i_blocks = le64_to_cpu(fe->logicalBlocksRecorded) <<
1235 (inode->i_sb->s_blocksize_bits - 9);
1237 if (!udf_disk_stamp_to_time(&inode->i_atime, fe->accessTime))
1238 inode->i_atime = sbi->s_record_time;
1240 if (!udf_disk_stamp_to_time(&inode->i_mtime,
1241 fe->modificationTime))
1242 inode->i_mtime = sbi->s_record_time;
1244 if (!udf_disk_stamp_to_time(&inode->i_ctime, fe->attrTime))
1245 inode->i_ctime = sbi->s_record_time;
1247 iinfo->i_unique = le64_to_cpu(fe->uniqueID);
1248 iinfo->i_lenEAttr = le32_to_cpu(fe->lengthExtendedAttr);
1249 iinfo->i_lenAlloc = le32_to_cpu(fe->lengthAllocDescs);
1250 offset = sizeof(struct fileEntry) + iinfo->i_lenEAttr;
1251 } else {
1252 inode->i_blocks = le64_to_cpu(efe->logicalBlocksRecorded) <<
1253 (inode->i_sb->s_blocksize_bits - 9);
1255 if (!udf_disk_stamp_to_time(&inode->i_atime, efe->accessTime))
1256 inode->i_atime = sbi->s_record_time;
1258 if (!udf_disk_stamp_to_time(&inode->i_mtime,
1259 efe->modificationTime))
1260 inode->i_mtime = sbi->s_record_time;
1262 if (!udf_disk_stamp_to_time(&iinfo->i_crtime, efe->createTime))
1263 iinfo->i_crtime = sbi->s_record_time;
1265 if (!udf_disk_stamp_to_time(&inode->i_ctime, efe->attrTime))
1266 inode->i_ctime = sbi->s_record_time;
1268 iinfo->i_unique = le64_to_cpu(efe->uniqueID);
1269 iinfo->i_lenEAttr = le32_to_cpu(efe->lengthExtendedAttr);
1270 iinfo->i_lenAlloc = le32_to_cpu(efe->lengthAllocDescs);
1271 offset = sizeof(struct extendedFileEntry) +
1272 iinfo->i_lenEAttr;
1275 switch (fe->icbTag.fileType) {
1276 case ICBTAG_FILE_TYPE_DIRECTORY:
1277 inode->i_op = &udf_dir_inode_operations;
1278 inode->i_fop = &udf_dir_operations;
1279 inode->i_mode |= S_IFDIR;
1280 inc_nlink(inode);
1281 break;
1282 case ICBTAG_FILE_TYPE_REALTIME:
1283 case ICBTAG_FILE_TYPE_REGULAR:
1284 case ICBTAG_FILE_TYPE_UNDEF:
1285 case ICBTAG_FILE_TYPE_VAT20:
1286 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
1287 inode->i_data.a_ops = &udf_adinicb_aops;
1288 else
1289 inode->i_data.a_ops = &udf_aops;
1290 inode->i_op = &udf_file_inode_operations;
1291 inode->i_fop = &udf_file_operations;
1292 inode->i_mode |= S_IFREG;
1293 break;
1294 case ICBTAG_FILE_TYPE_BLOCK:
1295 inode->i_mode |= S_IFBLK;
1296 break;
1297 case ICBTAG_FILE_TYPE_CHAR:
1298 inode->i_mode |= S_IFCHR;
1299 break;
1300 case ICBTAG_FILE_TYPE_FIFO:
1301 init_special_inode(inode, inode->i_mode | S_IFIFO, 0);
1302 break;
1303 case ICBTAG_FILE_TYPE_SOCKET:
1304 init_special_inode(inode, inode->i_mode | S_IFSOCK, 0);
1305 break;
1306 case ICBTAG_FILE_TYPE_SYMLINK:
1307 inode->i_data.a_ops = &udf_symlink_aops;
1308 inode->i_op = &udf_symlink_inode_operations;
1309 inode->i_mode = S_IFLNK | S_IRWXUGO;
1310 break;
1311 case ICBTAG_FILE_TYPE_MAIN:
1312 udf_debug("METADATA FILE-----\n");
1313 break;
1314 case ICBTAG_FILE_TYPE_MIRROR:
1315 udf_debug("METADATA MIRROR FILE-----\n");
1316 break;
1317 case ICBTAG_FILE_TYPE_BITMAP:
1318 udf_debug("METADATA BITMAP FILE-----\n");
1319 break;
1320 default:
1321 printk(KERN_ERR "udf: udf_fill_inode(ino %ld) failed unknown "
1322 "file type=%d\n", inode->i_ino,
1323 fe->icbTag.fileType);
1324 make_bad_inode(inode);
1325 return;
1327 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
1328 struct deviceSpec *dsea =
1329 (struct deviceSpec *)udf_get_extendedattr(inode, 12, 1);
1330 if (dsea) {
1331 init_special_inode(inode, inode->i_mode,
1332 MKDEV(le32_to_cpu(dsea->majorDeviceIdent),
1333 le32_to_cpu(dsea->minorDeviceIdent)));
1334 /* Developer ID ??? */
1335 } else
1336 make_bad_inode(inode);
1340 static int udf_alloc_i_data(struct inode *inode, size_t size)
1342 struct udf_inode_info *iinfo = UDF_I(inode);
1343 iinfo->i_ext.i_data = kmalloc(size, GFP_KERNEL);
1345 if (!iinfo->i_ext.i_data) {
1346 printk(KERN_ERR "udf:udf_alloc_i_data (ino %ld) "
1347 "no free memory\n", inode->i_ino);
1348 return -ENOMEM;
1351 return 0;
1354 static mode_t udf_convert_permissions(struct fileEntry *fe)
1356 mode_t mode;
1357 uint32_t permissions;
1358 uint32_t flags;
1360 permissions = le32_to_cpu(fe->permissions);
1361 flags = le16_to_cpu(fe->icbTag.flags);
1363 mode = ((permissions) & S_IRWXO) |
1364 ((permissions >> 2) & S_IRWXG) |
1365 ((permissions >> 4) & S_IRWXU) |
1366 ((flags & ICBTAG_FLAG_SETUID) ? S_ISUID : 0) |
1367 ((flags & ICBTAG_FLAG_SETGID) ? S_ISGID : 0) |
1368 ((flags & ICBTAG_FLAG_STICKY) ? S_ISVTX : 0);
1370 return mode;
1373 int udf_write_inode(struct inode *inode, struct writeback_control *wbc)
1375 return udf_update_inode(inode, wbc->sync_mode == WB_SYNC_ALL);
1378 static int udf_sync_inode(struct inode *inode)
1380 return udf_update_inode(inode, 1);
1383 static int udf_update_inode(struct inode *inode, int do_sync)
1385 struct buffer_head *bh = NULL;
1386 struct fileEntry *fe;
1387 struct extendedFileEntry *efe;
1388 uint32_t udfperms;
1389 uint16_t icbflags;
1390 uint16_t crclen;
1391 int err = 0;
1392 struct udf_sb_info *sbi = UDF_SB(inode->i_sb);
1393 unsigned char blocksize_bits = inode->i_sb->s_blocksize_bits;
1394 struct udf_inode_info *iinfo = UDF_I(inode);
1396 bh = udf_tgetblk(inode->i_sb,
1397 udf_get_lb_pblock(inode->i_sb, &iinfo->i_location, 0));
1398 if (!bh) {
1399 udf_debug("getblk failure\n");
1400 return -ENOMEM;
1403 lock_buffer(bh);
1404 memset(bh->b_data, 0, inode->i_sb->s_blocksize);
1405 fe = (struct fileEntry *)bh->b_data;
1406 efe = (struct extendedFileEntry *)bh->b_data;
1408 if (iinfo->i_use) {
1409 struct unallocSpaceEntry *use =
1410 (struct unallocSpaceEntry *)bh->b_data;
1412 use->lengthAllocDescs = cpu_to_le32(iinfo->i_lenAlloc);
1413 memcpy(bh->b_data + sizeof(struct unallocSpaceEntry),
1414 iinfo->i_ext.i_data, inode->i_sb->s_blocksize -
1415 sizeof(struct unallocSpaceEntry));
1416 use->descTag.tagIdent = cpu_to_le16(TAG_IDENT_USE);
1417 use->descTag.tagLocation =
1418 cpu_to_le32(iinfo->i_location.logicalBlockNum);
1419 crclen = sizeof(struct unallocSpaceEntry) +
1420 iinfo->i_lenAlloc - sizeof(struct tag);
1421 use->descTag.descCRCLength = cpu_to_le16(crclen);
1422 use->descTag.descCRC = cpu_to_le16(crc_itu_t(0, (char *)use +
1423 sizeof(struct tag),
1424 crclen));
1425 use->descTag.tagChecksum = udf_tag_checksum(&use->descTag);
1427 goto out;
1430 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_FORGET))
1431 fe->uid = cpu_to_le32(-1);
1432 else
1433 fe->uid = cpu_to_le32(inode->i_uid);
1435 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_GID_FORGET))
1436 fe->gid = cpu_to_le32(-1);
1437 else
1438 fe->gid = cpu_to_le32(inode->i_gid);
1440 udfperms = ((inode->i_mode & S_IRWXO)) |
1441 ((inode->i_mode & S_IRWXG) << 2) |
1442 ((inode->i_mode & S_IRWXU) << 4);
1444 udfperms |= (le32_to_cpu(fe->permissions) &
1445 (FE_PERM_O_DELETE | FE_PERM_O_CHATTR |
1446 FE_PERM_G_DELETE | FE_PERM_G_CHATTR |
1447 FE_PERM_U_DELETE | FE_PERM_U_CHATTR));
1448 fe->permissions = cpu_to_le32(udfperms);
1450 if (S_ISDIR(inode->i_mode))
1451 fe->fileLinkCount = cpu_to_le16(inode->i_nlink - 1);
1452 else
1453 fe->fileLinkCount = cpu_to_le16(inode->i_nlink);
1455 fe->informationLength = cpu_to_le64(inode->i_size);
1457 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
1458 struct regid *eid;
1459 struct deviceSpec *dsea =
1460 (struct deviceSpec *)udf_get_extendedattr(inode, 12, 1);
1461 if (!dsea) {
1462 dsea = (struct deviceSpec *)
1463 udf_add_extendedattr(inode,
1464 sizeof(struct deviceSpec) +
1465 sizeof(struct regid), 12, 0x3);
1466 dsea->attrType = cpu_to_le32(12);
1467 dsea->attrSubtype = 1;
1468 dsea->attrLength = cpu_to_le32(
1469 sizeof(struct deviceSpec) +
1470 sizeof(struct regid));
1471 dsea->impUseLength = cpu_to_le32(sizeof(struct regid));
1473 eid = (struct regid *)dsea->impUse;
1474 memset(eid, 0, sizeof(struct regid));
1475 strcpy(eid->ident, UDF_ID_DEVELOPER);
1476 eid->identSuffix[0] = UDF_OS_CLASS_UNIX;
1477 eid->identSuffix[1] = UDF_OS_ID_LINUX;
1478 dsea->majorDeviceIdent = cpu_to_le32(imajor(inode));
1479 dsea->minorDeviceIdent = cpu_to_le32(iminor(inode));
1482 if (iinfo->i_efe == 0) {
1483 memcpy(bh->b_data + sizeof(struct fileEntry),
1484 iinfo->i_ext.i_data,
1485 inode->i_sb->s_blocksize - sizeof(struct fileEntry));
1486 fe->logicalBlocksRecorded = cpu_to_le64(
1487 (inode->i_blocks + (1 << (blocksize_bits - 9)) - 1) >>
1488 (blocksize_bits - 9));
1490 udf_time_to_disk_stamp(&fe->accessTime, inode->i_atime);
1491 udf_time_to_disk_stamp(&fe->modificationTime, inode->i_mtime);
1492 udf_time_to_disk_stamp(&fe->attrTime, inode->i_ctime);
1493 memset(&(fe->impIdent), 0, sizeof(struct regid));
1494 strcpy(fe->impIdent.ident, UDF_ID_DEVELOPER);
1495 fe->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
1496 fe->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
1497 fe->uniqueID = cpu_to_le64(iinfo->i_unique);
1498 fe->lengthExtendedAttr = cpu_to_le32(iinfo->i_lenEAttr);
1499 fe->lengthAllocDescs = cpu_to_le32(iinfo->i_lenAlloc);
1500 fe->descTag.tagIdent = cpu_to_le16(TAG_IDENT_FE);
1501 crclen = sizeof(struct fileEntry);
1502 } else {
1503 memcpy(bh->b_data + sizeof(struct extendedFileEntry),
1504 iinfo->i_ext.i_data,
1505 inode->i_sb->s_blocksize -
1506 sizeof(struct extendedFileEntry));
1507 efe->objectSize = cpu_to_le64(inode->i_size);
1508 efe->logicalBlocksRecorded = cpu_to_le64(
1509 (inode->i_blocks + (1 << (blocksize_bits - 9)) - 1) >>
1510 (blocksize_bits - 9));
1512 if (iinfo->i_crtime.tv_sec > inode->i_atime.tv_sec ||
1513 (iinfo->i_crtime.tv_sec == inode->i_atime.tv_sec &&
1514 iinfo->i_crtime.tv_nsec > inode->i_atime.tv_nsec))
1515 iinfo->i_crtime = inode->i_atime;
1517 if (iinfo->i_crtime.tv_sec > inode->i_mtime.tv_sec ||
1518 (iinfo->i_crtime.tv_sec == inode->i_mtime.tv_sec &&
1519 iinfo->i_crtime.tv_nsec > inode->i_mtime.tv_nsec))
1520 iinfo->i_crtime = inode->i_mtime;
1522 if (iinfo->i_crtime.tv_sec > inode->i_ctime.tv_sec ||
1523 (iinfo->i_crtime.tv_sec == inode->i_ctime.tv_sec &&
1524 iinfo->i_crtime.tv_nsec > inode->i_ctime.tv_nsec))
1525 iinfo->i_crtime = inode->i_ctime;
1527 udf_time_to_disk_stamp(&efe->accessTime, inode->i_atime);
1528 udf_time_to_disk_stamp(&efe->modificationTime, inode->i_mtime);
1529 udf_time_to_disk_stamp(&efe->createTime, iinfo->i_crtime);
1530 udf_time_to_disk_stamp(&efe->attrTime, inode->i_ctime);
1532 memset(&(efe->impIdent), 0, sizeof(struct regid));
1533 strcpy(efe->impIdent.ident, UDF_ID_DEVELOPER);
1534 efe->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
1535 efe->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
1536 efe->uniqueID = cpu_to_le64(iinfo->i_unique);
1537 efe->lengthExtendedAttr = cpu_to_le32(iinfo->i_lenEAttr);
1538 efe->lengthAllocDescs = cpu_to_le32(iinfo->i_lenAlloc);
1539 efe->descTag.tagIdent = cpu_to_le16(TAG_IDENT_EFE);
1540 crclen = sizeof(struct extendedFileEntry);
1542 if (iinfo->i_strat4096) {
1543 fe->icbTag.strategyType = cpu_to_le16(4096);
1544 fe->icbTag.strategyParameter = cpu_to_le16(1);
1545 fe->icbTag.numEntries = cpu_to_le16(2);
1546 } else {
1547 fe->icbTag.strategyType = cpu_to_le16(4);
1548 fe->icbTag.numEntries = cpu_to_le16(1);
1551 if (S_ISDIR(inode->i_mode))
1552 fe->icbTag.fileType = ICBTAG_FILE_TYPE_DIRECTORY;
1553 else if (S_ISREG(inode->i_mode))
1554 fe->icbTag.fileType = ICBTAG_FILE_TYPE_REGULAR;
1555 else if (S_ISLNK(inode->i_mode))
1556 fe->icbTag.fileType = ICBTAG_FILE_TYPE_SYMLINK;
1557 else if (S_ISBLK(inode->i_mode))
1558 fe->icbTag.fileType = ICBTAG_FILE_TYPE_BLOCK;
1559 else if (S_ISCHR(inode->i_mode))
1560 fe->icbTag.fileType = ICBTAG_FILE_TYPE_CHAR;
1561 else if (S_ISFIFO(inode->i_mode))
1562 fe->icbTag.fileType = ICBTAG_FILE_TYPE_FIFO;
1563 else if (S_ISSOCK(inode->i_mode))
1564 fe->icbTag.fileType = ICBTAG_FILE_TYPE_SOCKET;
1566 icbflags = iinfo->i_alloc_type |
1567 ((inode->i_mode & S_ISUID) ? ICBTAG_FLAG_SETUID : 0) |
1568 ((inode->i_mode & S_ISGID) ? ICBTAG_FLAG_SETGID : 0) |
1569 ((inode->i_mode & S_ISVTX) ? ICBTAG_FLAG_STICKY : 0) |
1570 (le16_to_cpu(fe->icbTag.flags) &
1571 ~(ICBTAG_FLAG_AD_MASK | ICBTAG_FLAG_SETUID |
1572 ICBTAG_FLAG_SETGID | ICBTAG_FLAG_STICKY));
1574 fe->icbTag.flags = cpu_to_le16(icbflags);
1575 if (sbi->s_udfrev >= 0x0200)
1576 fe->descTag.descVersion = cpu_to_le16(3);
1577 else
1578 fe->descTag.descVersion = cpu_to_le16(2);
1579 fe->descTag.tagSerialNum = cpu_to_le16(sbi->s_serial_number);
1580 fe->descTag.tagLocation = cpu_to_le32(
1581 iinfo->i_location.logicalBlockNum);
1582 crclen += iinfo->i_lenEAttr + iinfo->i_lenAlloc - sizeof(struct tag);
1583 fe->descTag.descCRCLength = cpu_to_le16(crclen);
1584 fe->descTag.descCRC = cpu_to_le16(crc_itu_t(0, (char *)fe + sizeof(struct tag),
1585 crclen));
1586 fe->descTag.tagChecksum = udf_tag_checksum(&fe->descTag);
1588 out:
1589 set_buffer_uptodate(bh);
1590 unlock_buffer(bh);
1592 /* write the data blocks */
1593 mark_buffer_dirty(bh);
1594 if (do_sync) {
1595 sync_dirty_buffer(bh);
1596 if (buffer_write_io_error(bh)) {
1597 printk(KERN_WARNING "IO error syncing udf inode "
1598 "[%s:%08lx]\n", inode->i_sb->s_id,
1599 inode->i_ino);
1600 err = -EIO;
1603 brelse(bh);
1605 return err;
1608 struct inode *udf_iget(struct super_block *sb, struct kernel_lb_addr *ino)
1610 unsigned long block = udf_get_lb_pblock(sb, ino, 0);
1611 struct inode *inode = iget_locked(sb, block);
1613 if (!inode)
1614 return NULL;
1616 if (inode->i_state & I_NEW) {
1617 memcpy(&UDF_I(inode)->i_location, ino, sizeof(struct kernel_lb_addr));
1618 __udf_read_inode(inode);
1619 unlock_new_inode(inode);
1622 if (is_bad_inode(inode))
1623 goto out_iput;
1625 if (ino->logicalBlockNum >= UDF_SB(sb)->
1626 s_partmaps[ino->partitionReferenceNum].s_partition_len) {
1627 udf_debug("block=%d, partition=%d out of range\n",
1628 ino->logicalBlockNum, ino->partitionReferenceNum);
1629 make_bad_inode(inode);
1630 goto out_iput;
1633 return inode;
1635 out_iput:
1636 iput(inode);
1637 return NULL;
1640 int8_t udf_add_aext(struct inode *inode, struct extent_position *epos,
1641 struct kernel_lb_addr *eloc, uint32_t elen, int inc)
1643 int adsize;
1644 struct short_ad *sad = NULL;
1645 struct long_ad *lad = NULL;
1646 struct allocExtDesc *aed;
1647 int8_t etype;
1648 uint8_t *ptr;
1649 struct udf_inode_info *iinfo = UDF_I(inode);
1651 if (!epos->bh)
1652 ptr = iinfo->i_ext.i_data + epos->offset -
1653 udf_file_entry_alloc_offset(inode) +
1654 iinfo->i_lenEAttr;
1655 else
1656 ptr = epos->bh->b_data + epos->offset;
1658 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
1659 adsize = sizeof(struct short_ad);
1660 else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
1661 adsize = sizeof(struct long_ad);
1662 else
1663 return -1;
1665 if (epos->offset + (2 * adsize) > inode->i_sb->s_blocksize) {
1666 unsigned char *sptr, *dptr;
1667 struct buffer_head *nbh;
1668 int err, loffset;
1669 struct kernel_lb_addr obloc = epos->block;
1671 epos->block.logicalBlockNum = udf_new_block(inode->i_sb, NULL,
1672 obloc.partitionReferenceNum,
1673 obloc.logicalBlockNum, &err);
1674 if (!epos->block.logicalBlockNum)
1675 return -1;
1676 nbh = udf_tgetblk(inode->i_sb, udf_get_lb_pblock(inode->i_sb,
1677 &epos->block,
1678 0));
1679 if (!nbh)
1680 return -1;
1681 lock_buffer(nbh);
1682 memset(nbh->b_data, 0x00, inode->i_sb->s_blocksize);
1683 set_buffer_uptodate(nbh);
1684 unlock_buffer(nbh);
1685 mark_buffer_dirty_inode(nbh, inode);
1687 aed = (struct allocExtDesc *)(nbh->b_data);
1688 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT))
1689 aed->previousAllocExtLocation =
1690 cpu_to_le32(obloc.logicalBlockNum);
1691 if (epos->offset + adsize > inode->i_sb->s_blocksize) {
1692 loffset = epos->offset;
1693 aed->lengthAllocDescs = cpu_to_le32(adsize);
1694 sptr = ptr - adsize;
1695 dptr = nbh->b_data + sizeof(struct allocExtDesc);
1696 memcpy(dptr, sptr, adsize);
1697 epos->offset = sizeof(struct allocExtDesc) + adsize;
1698 } else {
1699 loffset = epos->offset + adsize;
1700 aed->lengthAllocDescs = cpu_to_le32(0);
1701 sptr = ptr;
1702 epos->offset = sizeof(struct allocExtDesc);
1704 if (epos->bh) {
1705 aed = (struct allocExtDesc *)epos->bh->b_data;
1706 le32_add_cpu(&aed->lengthAllocDescs, adsize);
1707 } else {
1708 iinfo->i_lenAlloc += adsize;
1709 mark_inode_dirty(inode);
1712 if (UDF_SB(inode->i_sb)->s_udfrev >= 0x0200)
1713 udf_new_tag(nbh->b_data, TAG_IDENT_AED, 3, 1,
1714 epos->block.logicalBlockNum, sizeof(struct tag));
1715 else
1716 udf_new_tag(nbh->b_data, TAG_IDENT_AED, 2, 1,
1717 epos->block.logicalBlockNum, sizeof(struct tag));
1718 switch (iinfo->i_alloc_type) {
1719 case ICBTAG_FLAG_AD_SHORT:
1720 sad = (struct short_ad *)sptr;
1721 sad->extLength = cpu_to_le32(EXT_NEXT_EXTENT_ALLOCDECS |
1722 inode->i_sb->s_blocksize);
1723 sad->extPosition =
1724 cpu_to_le32(epos->block.logicalBlockNum);
1725 break;
1726 case ICBTAG_FLAG_AD_LONG:
1727 lad = (struct long_ad *)sptr;
1728 lad->extLength = cpu_to_le32(EXT_NEXT_EXTENT_ALLOCDECS |
1729 inode->i_sb->s_blocksize);
1730 lad->extLocation = cpu_to_lelb(epos->block);
1731 memset(lad->impUse, 0x00, sizeof(lad->impUse));
1732 break;
1734 if (epos->bh) {
1735 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) ||
1736 UDF_SB(inode->i_sb)->s_udfrev >= 0x0201)
1737 udf_update_tag(epos->bh->b_data, loffset);
1738 else
1739 udf_update_tag(epos->bh->b_data,
1740 sizeof(struct allocExtDesc));
1741 mark_buffer_dirty_inode(epos->bh, inode);
1742 brelse(epos->bh);
1743 } else {
1744 mark_inode_dirty(inode);
1746 epos->bh = nbh;
1749 etype = udf_write_aext(inode, epos, eloc, elen, inc);
1751 if (!epos->bh) {
1752 iinfo->i_lenAlloc += adsize;
1753 mark_inode_dirty(inode);
1754 } else {
1755 aed = (struct allocExtDesc *)epos->bh->b_data;
1756 le32_add_cpu(&aed->lengthAllocDescs, adsize);
1757 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) ||
1758 UDF_SB(inode->i_sb)->s_udfrev >= 0x0201)
1759 udf_update_tag(epos->bh->b_data,
1760 epos->offset + (inc ? 0 : adsize));
1761 else
1762 udf_update_tag(epos->bh->b_data,
1763 sizeof(struct allocExtDesc));
1764 mark_buffer_dirty_inode(epos->bh, inode);
1767 return etype;
1770 int8_t udf_write_aext(struct inode *inode, struct extent_position *epos,
1771 struct kernel_lb_addr *eloc, uint32_t elen, int inc)
1773 int adsize;
1774 uint8_t *ptr;
1775 struct short_ad *sad;
1776 struct long_ad *lad;
1777 struct udf_inode_info *iinfo = UDF_I(inode);
1779 if (!epos->bh)
1780 ptr = iinfo->i_ext.i_data + epos->offset -
1781 udf_file_entry_alloc_offset(inode) +
1782 iinfo->i_lenEAttr;
1783 else
1784 ptr = epos->bh->b_data + epos->offset;
1786 switch (iinfo->i_alloc_type) {
1787 case ICBTAG_FLAG_AD_SHORT:
1788 sad = (struct short_ad *)ptr;
1789 sad->extLength = cpu_to_le32(elen);
1790 sad->extPosition = cpu_to_le32(eloc->logicalBlockNum);
1791 adsize = sizeof(struct short_ad);
1792 break;
1793 case ICBTAG_FLAG_AD_LONG:
1794 lad = (struct long_ad *)ptr;
1795 lad->extLength = cpu_to_le32(elen);
1796 lad->extLocation = cpu_to_lelb(*eloc);
1797 memset(lad->impUse, 0x00, sizeof(lad->impUse));
1798 adsize = sizeof(struct long_ad);
1799 break;
1800 default:
1801 return -1;
1804 if (epos->bh) {
1805 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) ||
1806 UDF_SB(inode->i_sb)->s_udfrev >= 0x0201) {
1807 struct allocExtDesc *aed =
1808 (struct allocExtDesc *)epos->bh->b_data;
1809 udf_update_tag(epos->bh->b_data,
1810 le32_to_cpu(aed->lengthAllocDescs) +
1811 sizeof(struct allocExtDesc));
1813 mark_buffer_dirty_inode(epos->bh, inode);
1814 } else {
1815 mark_inode_dirty(inode);
1818 if (inc)
1819 epos->offset += adsize;
1821 return (elen >> 30);
1824 int8_t udf_next_aext(struct inode *inode, struct extent_position *epos,
1825 struct kernel_lb_addr *eloc, uint32_t *elen, int inc)
1827 int8_t etype;
1829 while ((etype = udf_current_aext(inode, epos, eloc, elen, inc)) ==
1830 (EXT_NEXT_EXTENT_ALLOCDECS >> 30)) {
1831 int block;
1832 epos->block = *eloc;
1833 epos->offset = sizeof(struct allocExtDesc);
1834 brelse(epos->bh);
1835 block = udf_get_lb_pblock(inode->i_sb, &epos->block, 0);
1836 epos->bh = udf_tread(inode->i_sb, block);
1837 if (!epos->bh) {
1838 udf_debug("reading block %d failed!\n", block);
1839 return -1;
1843 return etype;
1846 int8_t udf_current_aext(struct inode *inode, struct extent_position *epos,
1847 struct kernel_lb_addr *eloc, uint32_t *elen, int inc)
1849 int alen;
1850 int8_t etype;
1851 uint8_t *ptr;
1852 struct short_ad *sad;
1853 struct long_ad *lad;
1854 struct udf_inode_info *iinfo = UDF_I(inode);
1856 if (!epos->bh) {
1857 if (!epos->offset)
1858 epos->offset = udf_file_entry_alloc_offset(inode);
1859 ptr = iinfo->i_ext.i_data + epos->offset -
1860 udf_file_entry_alloc_offset(inode) +
1861 iinfo->i_lenEAttr;
1862 alen = udf_file_entry_alloc_offset(inode) +
1863 iinfo->i_lenAlloc;
1864 } else {
1865 if (!epos->offset)
1866 epos->offset = sizeof(struct allocExtDesc);
1867 ptr = epos->bh->b_data + epos->offset;
1868 alen = sizeof(struct allocExtDesc) +
1869 le32_to_cpu(((struct allocExtDesc *)epos->bh->b_data)->
1870 lengthAllocDescs);
1873 switch (iinfo->i_alloc_type) {
1874 case ICBTAG_FLAG_AD_SHORT:
1875 sad = udf_get_fileshortad(ptr, alen, &epos->offset, inc);
1876 if (!sad)
1877 return -1;
1878 etype = le32_to_cpu(sad->extLength) >> 30;
1879 eloc->logicalBlockNum = le32_to_cpu(sad->extPosition);
1880 eloc->partitionReferenceNum =
1881 iinfo->i_location.partitionReferenceNum;
1882 *elen = le32_to_cpu(sad->extLength) & UDF_EXTENT_LENGTH_MASK;
1883 break;
1884 case ICBTAG_FLAG_AD_LONG:
1885 lad = udf_get_filelongad(ptr, alen, &epos->offset, inc);
1886 if (!lad)
1887 return -1;
1888 etype = le32_to_cpu(lad->extLength) >> 30;
1889 *eloc = lelb_to_cpu(lad->extLocation);
1890 *elen = le32_to_cpu(lad->extLength) & UDF_EXTENT_LENGTH_MASK;
1891 break;
1892 default:
1893 udf_debug("alloc_type = %d unsupported\n",
1894 iinfo->i_alloc_type);
1895 return -1;
1898 return etype;
1901 static int8_t udf_insert_aext(struct inode *inode, struct extent_position epos,
1902 struct kernel_lb_addr neloc, uint32_t nelen)
1904 struct kernel_lb_addr oeloc;
1905 uint32_t oelen;
1906 int8_t etype;
1908 if (epos.bh)
1909 get_bh(epos.bh);
1911 while ((etype = udf_next_aext(inode, &epos, &oeloc, &oelen, 0)) != -1) {
1912 udf_write_aext(inode, &epos, &neloc, nelen, 1);
1913 neloc = oeloc;
1914 nelen = (etype << 30) | oelen;
1916 udf_add_aext(inode, &epos, &neloc, nelen, 1);
1917 brelse(epos.bh);
1919 return (nelen >> 30);
1922 int8_t udf_delete_aext(struct inode *inode, struct extent_position epos,
1923 struct kernel_lb_addr eloc, uint32_t elen)
1925 struct extent_position oepos;
1926 int adsize;
1927 int8_t etype;
1928 struct allocExtDesc *aed;
1929 struct udf_inode_info *iinfo;
1931 if (epos.bh) {
1932 get_bh(epos.bh);
1933 get_bh(epos.bh);
1936 iinfo = UDF_I(inode);
1937 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
1938 adsize = sizeof(struct short_ad);
1939 else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
1940 adsize = sizeof(struct long_ad);
1941 else
1942 adsize = 0;
1944 oepos = epos;
1945 if (udf_next_aext(inode, &epos, &eloc, &elen, 1) == -1)
1946 return -1;
1948 while ((etype = udf_next_aext(inode, &epos, &eloc, &elen, 1)) != -1) {
1949 udf_write_aext(inode, &oepos, &eloc, (etype << 30) | elen, 1);
1950 if (oepos.bh != epos.bh) {
1951 oepos.block = epos.block;
1952 brelse(oepos.bh);
1953 get_bh(epos.bh);
1954 oepos.bh = epos.bh;
1955 oepos.offset = epos.offset - adsize;
1958 memset(&eloc, 0x00, sizeof(struct kernel_lb_addr));
1959 elen = 0;
1961 if (epos.bh != oepos.bh) {
1962 udf_free_blocks(inode->i_sb, inode, &epos.block, 0, 1);
1963 udf_write_aext(inode, &oepos, &eloc, elen, 1);
1964 udf_write_aext(inode, &oepos, &eloc, elen, 1);
1965 if (!oepos.bh) {
1966 iinfo->i_lenAlloc -= (adsize * 2);
1967 mark_inode_dirty(inode);
1968 } else {
1969 aed = (struct allocExtDesc *)oepos.bh->b_data;
1970 le32_add_cpu(&aed->lengthAllocDescs, -(2 * adsize));
1971 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) ||
1972 UDF_SB(inode->i_sb)->s_udfrev >= 0x0201)
1973 udf_update_tag(oepos.bh->b_data,
1974 oepos.offset - (2 * adsize));
1975 else
1976 udf_update_tag(oepos.bh->b_data,
1977 sizeof(struct allocExtDesc));
1978 mark_buffer_dirty_inode(oepos.bh, inode);
1980 } else {
1981 udf_write_aext(inode, &oepos, &eloc, elen, 1);
1982 if (!oepos.bh) {
1983 iinfo->i_lenAlloc -= adsize;
1984 mark_inode_dirty(inode);
1985 } else {
1986 aed = (struct allocExtDesc *)oepos.bh->b_data;
1987 le32_add_cpu(&aed->lengthAllocDescs, -adsize);
1988 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) ||
1989 UDF_SB(inode->i_sb)->s_udfrev >= 0x0201)
1990 udf_update_tag(oepos.bh->b_data,
1991 epos.offset - adsize);
1992 else
1993 udf_update_tag(oepos.bh->b_data,
1994 sizeof(struct allocExtDesc));
1995 mark_buffer_dirty_inode(oepos.bh, inode);
1999 brelse(epos.bh);
2000 brelse(oepos.bh);
2002 return (elen >> 30);
2005 int8_t inode_bmap(struct inode *inode, sector_t block,
2006 struct extent_position *pos, struct kernel_lb_addr *eloc,
2007 uint32_t *elen, sector_t *offset)
2009 unsigned char blocksize_bits = inode->i_sb->s_blocksize_bits;
2010 loff_t lbcount = 0, bcount =
2011 (loff_t) block << blocksize_bits;
2012 int8_t etype;
2013 struct udf_inode_info *iinfo;
2015 iinfo = UDF_I(inode);
2016 pos->offset = 0;
2017 pos->block = iinfo->i_location;
2018 pos->bh = NULL;
2019 *elen = 0;
2021 do {
2022 etype = udf_next_aext(inode, pos, eloc, elen, 1);
2023 if (etype == -1) {
2024 *offset = (bcount - lbcount) >> blocksize_bits;
2025 iinfo->i_lenExtents = lbcount;
2026 return -1;
2028 lbcount += *elen;
2029 } while (lbcount <= bcount);
2031 *offset = (bcount + *elen - lbcount) >> blocksize_bits;
2033 return etype;
2036 long udf_block_map(struct inode *inode, sector_t block)
2038 struct kernel_lb_addr eloc;
2039 uint32_t elen;
2040 sector_t offset;
2041 struct extent_position epos = {};
2042 int ret;
2044 down_read(&UDF_I(inode)->i_data_sem);
2046 if (inode_bmap(inode, block, &epos, &eloc, &elen, &offset) ==
2047 (EXT_RECORDED_ALLOCATED >> 30))
2048 ret = udf_get_lb_pblock(inode->i_sb, &eloc, offset);
2049 else
2050 ret = 0;
2052 up_read(&UDF_I(inode)->i_data_sem);
2053 brelse(epos.bh);
2055 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_VARCONV))
2056 return udf_fixed_to_variable(ret);
2057 else
2058 return ret;