usb: gadget: u_ether: remove interrupt throttling
[linux/fpc-iii.git] / fs / hfsplus / extents.c
blobfeca524ce2a5c7d6034bf0a528679fe923381507
1 /*
2 * linux/fs/hfsplus/extents.c
4 * Copyright (C) 2001
5 * Brad Boyer (flar@allandria.com)
6 * (C) 2003 Ardis Technologies <roman@ardistech.com>
8 * Handling of Extents both in catalog and extents overflow trees
9 */
11 #include <linux/errno.h>
12 #include <linux/fs.h>
13 #include <linux/pagemap.h>
15 #include "hfsplus_fs.h"
16 #include "hfsplus_raw.h"
18 /* Compare two extents keys, returns 0 on same, pos/neg for difference */
19 int hfsplus_ext_cmp_key(const hfsplus_btree_key *k1,
20 const hfsplus_btree_key *k2)
22 __be32 k1id, k2id;
23 __be32 k1s, k2s;
25 k1id = k1->ext.cnid;
26 k2id = k2->ext.cnid;
27 if (k1id != k2id)
28 return be32_to_cpu(k1id) < be32_to_cpu(k2id) ? -1 : 1;
30 if (k1->ext.fork_type != k2->ext.fork_type)
31 return k1->ext.fork_type < k2->ext.fork_type ? -1 : 1;
33 k1s = k1->ext.start_block;
34 k2s = k2->ext.start_block;
35 if (k1s == k2s)
36 return 0;
37 return be32_to_cpu(k1s) < be32_to_cpu(k2s) ? -1 : 1;
40 static void hfsplus_ext_build_key(hfsplus_btree_key *key, u32 cnid,
41 u32 block, u8 type)
43 key->key_len = cpu_to_be16(HFSPLUS_EXT_KEYLEN - 2);
44 key->ext.cnid = cpu_to_be32(cnid);
45 key->ext.start_block = cpu_to_be32(block);
46 key->ext.fork_type = type;
47 key->ext.pad = 0;
50 static u32 hfsplus_ext_find_block(struct hfsplus_extent *ext, u32 off)
52 int i;
53 u32 count;
55 for (i = 0; i < 8; ext++, i++) {
56 count = be32_to_cpu(ext->block_count);
57 if (off < count)
58 return be32_to_cpu(ext->start_block) + off;
59 off -= count;
61 /* panic? */
62 return 0;
65 static int hfsplus_ext_block_count(struct hfsplus_extent *ext)
67 int i;
68 u32 count = 0;
70 for (i = 0; i < 8; ext++, i++)
71 count += be32_to_cpu(ext->block_count);
72 return count;
75 static u32 hfsplus_ext_lastblock(struct hfsplus_extent *ext)
77 int i;
79 ext += 7;
80 for (i = 0; i < 7; ext--, i++)
81 if (ext->block_count)
82 break;
83 return be32_to_cpu(ext->start_block) + be32_to_cpu(ext->block_count);
86 static int __hfsplus_ext_write_extent(struct inode *inode,
87 struct hfs_find_data *fd)
89 struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
90 int res;
92 WARN_ON(!mutex_is_locked(&hip->extents_lock));
94 hfsplus_ext_build_key(fd->search_key, inode->i_ino, hip->cached_start,
95 HFSPLUS_IS_RSRC(inode) ?
96 HFSPLUS_TYPE_RSRC : HFSPLUS_TYPE_DATA);
98 res = hfs_brec_find(fd, hfs_find_rec_by_key);
99 if (hip->extent_state & HFSPLUS_EXT_NEW) {
100 if (res != -ENOENT)
101 return res;
102 hfs_brec_insert(fd, hip->cached_extents,
103 sizeof(hfsplus_extent_rec));
104 hip->extent_state &= ~(HFSPLUS_EXT_DIRTY | HFSPLUS_EXT_NEW);
105 } else {
106 if (res)
107 return res;
108 hfs_bnode_write(fd->bnode, hip->cached_extents,
109 fd->entryoffset, fd->entrylength);
110 hip->extent_state &= ~HFSPLUS_EXT_DIRTY;
114 * We can't just use hfsplus_mark_inode_dirty here, because we
115 * also get called from hfsplus_write_inode, which should not
116 * redirty the inode. Instead the callers have to be careful
117 * to explicily mark the inode dirty, too.
119 set_bit(HFSPLUS_I_EXT_DIRTY, &hip->flags);
121 return 0;
124 static int hfsplus_ext_write_extent_locked(struct inode *inode)
126 int res = 0;
128 if (HFSPLUS_I(inode)->extent_state & HFSPLUS_EXT_DIRTY) {
129 struct hfs_find_data fd;
131 res = hfs_find_init(HFSPLUS_SB(inode->i_sb)->ext_tree, &fd);
132 if (res)
133 return res;
134 res = __hfsplus_ext_write_extent(inode, &fd);
135 hfs_find_exit(&fd);
137 return res;
140 int hfsplus_ext_write_extent(struct inode *inode)
142 int res;
144 mutex_lock(&HFSPLUS_I(inode)->extents_lock);
145 res = hfsplus_ext_write_extent_locked(inode);
146 mutex_unlock(&HFSPLUS_I(inode)->extents_lock);
148 return res;
151 static inline int __hfsplus_ext_read_extent(struct hfs_find_data *fd,
152 struct hfsplus_extent *extent,
153 u32 cnid, u32 block, u8 type)
155 int res;
157 hfsplus_ext_build_key(fd->search_key, cnid, block, type);
158 fd->key->ext.cnid = 0;
159 res = hfs_brec_find(fd, hfs_find_rec_by_key);
160 if (res && res != -ENOENT)
161 return res;
162 if (fd->key->ext.cnid != fd->search_key->ext.cnid ||
163 fd->key->ext.fork_type != fd->search_key->ext.fork_type)
164 return -ENOENT;
165 if (fd->entrylength != sizeof(hfsplus_extent_rec))
166 return -EIO;
167 hfs_bnode_read(fd->bnode, extent, fd->entryoffset,
168 sizeof(hfsplus_extent_rec));
169 return 0;
172 static inline int __hfsplus_ext_cache_extent(struct hfs_find_data *fd,
173 struct inode *inode, u32 block)
175 struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
176 int res;
178 WARN_ON(!mutex_is_locked(&hip->extents_lock));
180 if (hip->extent_state & HFSPLUS_EXT_DIRTY) {
181 res = __hfsplus_ext_write_extent(inode, fd);
182 if (res)
183 return res;
186 res = __hfsplus_ext_read_extent(fd, hip->cached_extents, inode->i_ino,
187 block, HFSPLUS_IS_RSRC(inode) ?
188 HFSPLUS_TYPE_RSRC :
189 HFSPLUS_TYPE_DATA);
190 if (!res) {
191 hip->cached_start = be32_to_cpu(fd->key->ext.start_block);
192 hip->cached_blocks =
193 hfsplus_ext_block_count(hip->cached_extents);
194 } else {
195 hip->cached_start = hip->cached_blocks = 0;
196 hip->extent_state &= ~(HFSPLUS_EXT_DIRTY | HFSPLUS_EXT_NEW);
198 return res;
201 static int hfsplus_ext_read_extent(struct inode *inode, u32 block)
203 struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
204 struct hfs_find_data fd;
205 int res;
207 if (block >= hip->cached_start &&
208 block < hip->cached_start + hip->cached_blocks)
209 return 0;
211 res = hfs_find_init(HFSPLUS_SB(inode->i_sb)->ext_tree, &fd);
212 if (!res) {
213 res = __hfsplus_ext_cache_extent(&fd, inode, block);
214 hfs_find_exit(&fd);
216 return res;
219 /* Get a block at iblock for inode, possibly allocating if create */
220 int hfsplus_get_block(struct inode *inode, sector_t iblock,
221 struct buffer_head *bh_result, int create)
223 struct super_block *sb = inode->i_sb;
224 struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
225 struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
226 int res = -EIO;
227 u32 ablock, dblock, mask;
228 sector_t sector;
229 int was_dirty = 0;
231 /* Convert inode block to disk allocation block */
232 ablock = iblock >> sbi->fs_shift;
234 if (iblock >= hip->fs_blocks) {
235 if (iblock > hip->fs_blocks || !create)
236 return -EIO;
237 if (ablock >= hip->alloc_blocks) {
238 res = hfsplus_file_extend(inode, false);
239 if (res)
240 return res;
242 } else
243 create = 0;
245 if (ablock < hip->first_blocks) {
246 dblock = hfsplus_ext_find_block(hip->first_extents, ablock);
247 goto done;
250 if (inode->i_ino == HFSPLUS_EXT_CNID)
251 return -EIO;
253 mutex_lock(&hip->extents_lock);
256 * hfsplus_ext_read_extent will write out a cached extent into
257 * the extents btree. In that case we may have to mark the inode
258 * dirty even for a pure read of an extent here.
260 was_dirty = (hip->extent_state & HFSPLUS_EXT_DIRTY);
261 res = hfsplus_ext_read_extent(inode, ablock);
262 if (res) {
263 mutex_unlock(&hip->extents_lock);
264 return -EIO;
266 dblock = hfsplus_ext_find_block(hip->cached_extents,
267 ablock - hip->cached_start);
268 mutex_unlock(&hip->extents_lock);
270 done:
271 hfs_dbg(EXTENT, "get_block(%lu): %llu - %u\n",
272 inode->i_ino, (long long)iblock, dblock);
274 mask = (1 << sbi->fs_shift) - 1;
275 sector = ((sector_t)dblock << sbi->fs_shift) +
276 sbi->blockoffset + (iblock & mask);
277 map_bh(bh_result, sb, sector);
279 if (create) {
280 set_buffer_new(bh_result);
281 hip->phys_size += sb->s_blocksize;
282 hip->fs_blocks++;
283 inode_add_bytes(inode, sb->s_blocksize);
285 if (create || was_dirty)
286 mark_inode_dirty(inode);
287 return 0;
290 static void hfsplus_dump_extent(struct hfsplus_extent *extent)
292 int i;
294 hfs_dbg(EXTENT, " ");
295 for (i = 0; i < 8; i++)
296 hfs_dbg_cont(EXTENT, " %u:%u",
297 be32_to_cpu(extent[i].start_block),
298 be32_to_cpu(extent[i].block_count));
299 hfs_dbg_cont(EXTENT, "\n");
302 static int hfsplus_add_extent(struct hfsplus_extent *extent, u32 offset,
303 u32 alloc_block, u32 block_count)
305 u32 count, start;
306 int i;
308 hfsplus_dump_extent(extent);
309 for (i = 0; i < 8; extent++, i++) {
310 count = be32_to_cpu(extent->block_count);
311 if (offset == count) {
312 start = be32_to_cpu(extent->start_block);
313 if (alloc_block != start + count) {
314 if (++i >= 8)
315 return -ENOSPC;
316 extent++;
317 extent->start_block = cpu_to_be32(alloc_block);
318 } else
319 block_count += count;
320 extent->block_count = cpu_to_be32(block_count);
321 return 0;
322 } else if (offset < count)
323 break;
324 offset -= count;
326 /* panic? */
327 return -EIO;
330 static int hfsplus_free_extents(struct super_block *sb,
331 struct hfsplus_extent *extent,
332 u32 offset, u32 block_nr)
334 u32 count, start;
335 int i;
336 int err = 0;
338 hfsplus_dump_extent(extent);
339 for (i = 0; i < 8; extent++, i++) {
340 count = be32_to_cpu(extent->block_count);
341 if (offset == count)
342 goto found;
343 else if (offset < count)
344 break;
345 offset -= count;
347 /* panic? */
348 return -EIO;
349 found:
350 for (;;) {
351 start = be32_to_cpu(extent->start_block);
352 if (count <= block_nr) {
353 err = hfsplus_block_free(sb, start, count);
354 if (err) {
355 pr_err("can't free extent\n");
356 hfs_dbg(EXTENT, " start: %u count: %u\n",
357 start, count);
359 extent->block_count = 0;
360 extent->start_block = 0;
361 block_nr -= count;
362 } else {
363 count -= block_nr;
364 err = hfsplus_block_free(sb, start + count, block_nr);
365 if (err) {
366 pr_err("can't free extent\n");
367 hfs_dbg(EXTENT, " start: %u count: %u\n",
368 start, count);
370 extent->block_count = cpu_to_be32(count);
371 block_nr = 0;
373 if (!block_nr || !i) {
375 * Try to free all extents and
376 * return only last error
378 return err;
380 i--;
381 extent--;
382 count = be32_to_cpu(extent->block_count);
386 int hfsplus_free_fork(struct super_block *sb, u32 cnid,
387 struct hfsplus_fork_raw *fork, int type)
389 struct hfs_find_data fd;
390 hfsplus_extent_rec ext_entry;
391 u32 total_blocks, blocks, start;
392 int res, i;
394 total_blocks = be32_to_cpu(fork->total_blocks);
395 if (!total_blocks)
396 return 0;
398 blocks = 0;
399 for (i = 0; i < 8; i++)
400 blocks += be32_to_cpu(fork->extents[i].block_count);
402 res = hfsplus_free_extents(sb, fork->extents, blocks, blocks);
403 if (res)
404 return res;
405 if (total_blocks == blocks)
406 return 0;
408 res = hfs_find_init(HFSPLUS_SB(sb)->ext_tree, &fd);
409 if (res)
410 return res;
411 do {
412 res = __hfsplus_ext_read_extent(&fd, ext_entry, cnid,
413 total_blocks, type);
414 if (res)
415 break;
416 start = be32_to_cpu(fd.key->ext.start_block);
417 hfsplus_free_extents(sb, ext_entry,
418 total_blocks - start,
419 total_blocks);
420 hfs_brec_remove(&fd);
421 total_blocks = start;
422 } while (total_blocks > blocks);
423 hfs_find_exit(&fd);
425 return res;
428 int hfsplus_file_extend(struct inode *inode, bool zeroout)
430 struct super_block *sb = inode->i_sb;
431 struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
432 struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
433 u32 start, len, goal;
434 int res;
436 if (sbi->alloc_file->i_size * 8 <
437 sbi->total_blocks - sbi->free_blocks + 8) {
438 /* extend alloc file */
439 pr_err("extend alloc file! (%llu,%u,%u)\n",
440 sbi->alloc_file->i_size * 8,
441 sbi->total_blocks, sbi->free_blocks);
442 return -ENOSPC;
445 mutex_lock(&hip->extents_lock);
446 if (hip->alloc_blocks == hip->first_blocks)
447 goal = hfsplus_ext_lastblock(hip->first_extents);
448 else {
449 res = hfsplus_ext_read_extent(inode, hip->alloc_blocks);
450 if (res)
451 goto out;
452 goal = hfsplus_ext_lastblock(hip->cached_extents);
455 len = hip->clump_blocks;
456 start = hfsplus_block_allocate(sb, sbi->total_blocks, goal, &len);
457 if (start >= sbi->total_blocks) {
458 start = hfsplus_block_allocate(sb, goal, 0, &len);
459 if (start >= goal) {
460 res = -ENOSPC;
461 goto out;
465 if (zeroout) {
466 res = sb_issue_zeroout(sb, start, len, GFP_NOFS);
467 if (res)
468 goto out;
471 hfs_dbg(EXTENT, "extend %lu: %u,%u\n", inode->i_ino, start, len);
473 if (hip->alloc_blocks <= hip->first_blocks) {
474 if (!hip->first_blocks) {
475 hfs_dbg(EXTENT, "first extents\n");
476 /* no extents yet */
477 hip->first_extents[0].start_block = cpu_to_be32(start);
478 hip->first_extents[0].block_count = cpu_to_be32(len);
479 res = 0;
480 } else {
481 /* try to append to extents in inode */
482 res = hfsplus_add_extent(hip->first_extents,
483 hip->alloc_blocks,
484 start, len);
485 if (res == -ENOSPC)
486 goto insert_extent;
488 if (!res) {
489 hfsplus_dump_extent(hip->first_extents);
490 hip->first_blocks += len;
492 } else {
493 res = hfsplus_add_extent(hip->cached_extents,
494 hip->alloc_blocks - hip->cached_start,
495 start, len);
496 if (!res) {
497 hfsplus_dump_extent(hip->cached_extents);
498 hip->extent_state |= HFSPLUS_EXT_DIRTY;
499 hip->cached_blocks += len;
500 } else if (res == -ENOSPC)
501 goto insert_extent;
503 out:
504 if (!res) {
505 hip->alloc_blocks += len;
506 mutex_unlock(&hip->extents_lock);
507 hfsplus_mark_inode_dirty(inode, HFSPLUS_I_ALLOC_DIRTY);
508 return 0;
510 mutex_unlock(&hip->extents_lock);
511 return res;
513 insert_extent:
514 hfs_dbg(EXTENT, "insert new extent\n");
515 res = hfsplus_ext_write_extent_locked(inode);
516 if (res)
517 goto out;
519 memset(hip->cached_extents, 0, sizeof(hfsplus_extent_rec));
520 hip->cached_extents[0].start_block = cpu_to_be32(start);
521 hip->cached_extents[0].block_count = cpu_to_be32(len);
522 hfsplus_dump_extent(hip->cached_extents);
523 hip->extent_state |= HFSPLUS_EXT_DIRTY | HFSPLUS_EXT_NEW;
524 hip->cached_start = hip->alloc_blocks;
525 hip->cached_blocks = len;
527 res = 0;
528 goto out;
531 void hfsplus_file_truncate(struct inode *inode)
533 struct super_block *sb = inode->i_sb;
534 struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
535 struct hfs_find_data fd;
536 u32 alloc_cnt, blk_cnt, start;
537 int res;
539 hfs_dbg(INODE, "truncate: %lu, %llu -> %llu\n",
540 inode->i_ino, (long long)hip->phys_size, inode->i_size);
542 if (inode->i_size > hip->phys_size) {
543 struct address_space *mapping = inode->i_mapping;
544 struct page *page;
545 void *fsdata;
546 loff_t size = inode->i_size;
548 res = pagecache_write_begin(NULL, mapping, size, 0,
549 AOP_FLAG_UNINTERRUPTIBLE,
550 &page, &fsdata);
551 if (res)
552 return;
553 res = pagecache_write_end(NULL, mapping, size,
554 0, 0, page, fsdata);
555 if (res < 0)
556 return;
557 mark_inode_dirty(inode);
558 return;
559 } else if (inode->i_size == hip->phys_size)
560 return;
562 blk_cnt = (inode->i_size + HFSPLUS_SB(sb)->alloc_blksz - 1) >>
563 HFSPLUS_SB(sb)->alloc_blksz_shift;
565 mutex_lock(&hip->extents_lock);
567 alloc_cnt = hip->alloc_blocks;
568 if (blk_cnt == alloc_cnt)
569 goto out_unlock;
571 res = hfs_find_init(HFSPLUS_SB(sb)->ext_tree, &fd);
572 if (res) {
573 mutex_unlock(&hip->extents_lock);
574 /* XXX: We lack error handling of hfsplus_file_truncate() */
575 return;
577 while (1) {
578 if (alloc_cnt == hip->first_blocks) {
579 hfsplus_free_extents(sb, hip->first_extents,
580 alloc_cnt, alloc_cnt - blk_cnt);
581 hfsplus_dump_extent(hip->first_extents);
582 hip->first_blocks = blk_cnt;
583 break;
585 res = __hfsplus_ext_cache_extent(&fd, inode, alloc_cnt);
586 if (res)
587 break;
588 start = hip->cached_start;
589 hfsplus_free_extents(sb, hip->cached_extents,
590 alloc_cnt - start, alloc_cnt - blk_cnt);
591 hfsplus_dump_extent(hip->cached_extents);
592 if (blk_cnt > start) {
593 hip->extent_state |= HFSPLUS_EXT_DIRTY;
594 break;
596 alloc_cnt = start;
597 hip->cached_start = hip->cached_blocks = 0;
598 hip->extent_state &= ~(HFSPLUS_EXT_DIRTY | HFSPLUS_EXT_NEW);
599 hfs_brec_remove(&fd);
601 hfs_find_exit(&fd);
603 hip->alloc_blocks = blk_cnt;
604 out_unlock:
605 mutex_unlock(&hip->extents_lock);
606 hip->phys_size = inode->i_size;
607 hip->fs_blocks = (inode->i_size + sb->s_blocksize - 1) >>
608 sb->s_blocksize_bits;
609 inode_set_bytes(inode, hip->fs_blocks << sb->s_blocksize_bits);
610 hfsplus_mark_inode_dirty(inode, HFSPLUS_I_ALLOC_DIRTY);