mm: fix exec activate_mm vs TLB shootdown and lazy tlb switching race
[linux/fpc-iii.git] / fs / hfsplus / extents.c
blob58f296bfd4380cc147019e27e25693e4c426325e
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * linux/fs/hfsplus/extents.c
5 * Copyright (C) 2001
6 * Brad Boyer (flar@allandria.com)
7 * (C) 2003 Ardis Technologies <roman@ardistech.com>
9 * Handling of Extents both in catalog and extents overflow trees
12 #include <linux/errno.h>
13 #include <linux/fs.h>
14 #include <linux/pagemap.h>
16 #include "hfsplus_fs.h"
17 #include "hfsplus_raw.h"
19 /* Compare two extents keys, returns 0 on same, pos/neg for difference */
20 int hfsplus_ext_cmp_key(const hfsplus_btree_key *k1,
21 const hfsplus_btree_key *k2)
23 __be32 k1id, k2id;
24 __be32 k1s, k2s;
26 k1id = k1->ext.cnid;
27 k2id = k2->ext.cnid;
28 if (k1id != k2id)
29 return be32_to_cpu(k1id) < be32_to_cpu(k2id) ? -1 : 1;
31 if (k1->ext.fork_type != k2->ext.fork_type)
32 return k1->ext.fork_type < k2->ext.fork_type ? -1 : 1;
34 k1s = k1->ext.start_block;
35 k2s = k2->ext.start_block;
36 if (k1s == k2s)
37 return 0;
38 return be32_to_cpu(k1s) < be32_to_cpu(k2s) ? -1 : 1;
41 static void hfsplus_ext_build_key(hfsplus_btree_key *key, u32 cnid,
42 u32 block, u8 type)
44 key->key_len = cpu_to_be16(HFSPLUS_EXT_KEYLEN - 2);
45 key->ext.cnid = cpu_to_be32(cnid);
46 key->ext.start_block = cpu_to_be32(block);
47 key->ext.fork_type = type;
48 key->ext.pad = 0;
51 static u32 hfsplus_ext_find_block(struct hfsplus_extent *ext, u32 off)
53 int i;
54 u32 count;
56 for (i = 0; i < 8; ext++, i++) {
57 count = be32_to_cpu(ext->block_count);
58 if (off < count)
59 return be32_to_cpu(ext->start_block) + off;
60 off -= count;
62 /* panic? */
63 return 0;
66 static int hfsplus_ext_block_count(struct hfsplus_extent *ext)
68 int i;
69 u32 count = 0;
71 for (i = 0; i < 8; ext++, i++)
72 count += be32_to_cpu(ext->block_count);
73 return count;
76 static u32 hfsplus_ext_lastblock(struct hfsplus_extent *ext)
78 int i;
80 ext += 7;
81 for (i = 0; i < 7; ext--, i++)
82 if (ext->block_count)
83 break;
84 return be32_to_cpu(ext->start_block) + be32_to_cpu(ext->block_count);
87 static int __hfsplus_ext_write_extent(struct inode *inode,
88 struct hfs_find_data *fd)
90 struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
91 int res;
93 WARN_ON(!mutex_is_locked(&hip->extents_lock));
95 hfsplus_ext_build_key(fd->search_key, inode->i_ino, hip->cached_start,
96 HFSPLUS_IS_RSRC(inode) ?
97 HFSPLUS_TYPE_RSRC : HFSPLUS_TYPE_DATA);
99 res = hfs_brec_find(fd, hfs_find_rec_by_key);
100 if (hip->extent_state & HFSPLUS_EXT_NEW) {
101 if (res != -ENOENT)
102 return res;
103 /* Fail early and avoid ENOSPC during the btree operation */
104 res = hfs_bmap_reserve(fd->tree, fd->tree->depth + 1);
105 if (res)
106 return res;
107 hfs_brec_insert(fd, hip->cached_extents,
108 sizeof(hfsplus_extent_rec));
109 hip->extent_state &= ~(HFSPLUS_EXT_DIRTY | HFSPLUS_EXT_NEW);
110 } else {
111 if (res)
112 return res;
113 hfs_bnode_write(fd->bnode, hip->cached_extents,
114 fd->entryoffset, fd->entrylength);
115 hip->extent_state &= ~HFSPLUS_EXT_DIRTY;
119 * We can't just use hfsplus_mark_inode_dirty here, because we
120 * also get called from hfsplus_write_inode, which should not
121 * redirty the inode. Instead the callers have to be careful
122 * to explicily mark the inode dirty, too.
124 set_bit(HFSPLUS_I_EXT_DIRTY, &hip->flags);
126 return 0;
129 static int hfsplus_ext_write_extent_locked(struct inode *inode)
131 int res = 0;
133 if (HFSPLUS_I(inode)->extent_state & HFSPLUS_EXT_DIRTY) {
134 struct hfs_find_data fd;
136 res = hfs_find_init(HFSPLUS_SB(inode->i_sb)->ext_tree, &fd);
137 if (res)
138 return res;
139 res = __hfsplus_ext_write_extent(inode, &fd);
140 hfs_find_exit(&fd);
142 return res;
145 int hfsplus_ext_write_extent(struct inode *inode)
147 int res;
149 mutex_lock(&HFSPLUS_I(inode)->extents_lock);
150 res = hfsplus_ext_write_extent_locked(inode);
151 mutex_unlock(&HFSPLUS_I(inode)->extents_lock);
153 return res;
156 static inline int __hfsplus_ext_read_extent(struct hfs_find_data *fd,
157 struct hfsplus_extent *extent,
158 u32 cnid, u32 block, u8 type)
160 int res;
162 hfsplus_ext_build_key(fd->search_key, cnid, block, type);
163 fd->key->ext.cnid = 0;
164 res = hfs_brec_find(fd, hfs_find_rec_by_key);
165 if (res && res != -ENOENT)
166 return res;
167 if (fd->key->ext.cnid != fd->search_key->ext.cnid ||
168 fd->key->ext.fork_type != fd->search_key->ext.fork_type)
169 return -ENOENT;
170 if (fd->entrylength != sizeof(hfsplus_extent_rec))
171 return -EIO;
172 hfs_bnode_read(fd->bnode, extent, fd->entryoffset,
173 sizeof(hfsplus_extent_rec));
174 return 0;
177 static inline int __hfsplus_ext_cache_extent(struct hfs_find_data *fd,
178 struct inode *inode, u32 block)
180 struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
181 int res;
183 WARN_ON(!mutex_is_locked(&hip->extents_lock));
185 if (hip->extent_state & HFSPLUS_EXT_DIRTY) {
186 res = __hfsplus_ext_write_extent(inode, fd);
187 if (res)
188 return res;
191 res = __hfsplus_ext_read_extent(fd, hip->cached_extents, inode->i_ino,
192 block, HFSPLUS_IS_RSRC(inode) ?
193 HFSPLUS_TYPE_RSRC :
194 HFSPLUS_TYPE_DATA);
195 if (!res) {
196 hip->cached_start = be32_to_cpu(fd->key->ext.start_block);
197 hip->cached_blocks =
198 hfsplus_ext_block_count(hip->cached_extents);
199 } else {
200 hip->cached_start = hip->cached_blocks = 0;
201 hip->extent_state &= ~(HFSPLUS_EXT_DIRTY | HFSPLUS_EXT_NEW);
203 return res;
206 static int hfsplus_ext_read_extent(struct inode *inode, u32 block)
208 struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
209 struct hfs_find_data fd;
210 int res;
212 if (block >= hip->cached_start &&
213 block < hip->cached_start + hip->cached_blocks)
214 return 0;
216 res = hfs_find_init(HFSPLUS_SB(inode->i_sb)->ext_tree, &fd);
217 if (!res) {
218 res = __hfsplus_ext_cache_extent(&fd, inode, block);
219 hfs_find_exit(&fd);
221 return res;
224 /* Get a block at iblock for inode, possibly allocating if create */
225 int hfsplus_get_block(struct inode *inode, sector_t iblock,
226 struct buffer_head *bh_result, int create)
228 struct super_block *sb = inode->i_sb;
229 struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
230 struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
231 int res = -EIO;
232 u32 ablock, dblock, mask;
233 sector_t sector;
234 int was_dirty = 0;
236 /* Convert inode block to disk allocation block */
237 ablock = iblock >> sbi->fs_shift;
239 if (iblock >= hip->fs_blocks) {
240 if (!create)
241 return 0;
242 if (iblock > hip->fs_blocks)
243 return -EIO;
244 if (ablock >= hip->alloc_blocks) {
245 res = hfsplus_file_extend(inode, false);
246 if (res)
247 return res;
249 } else
250 create = 0;
252 if (ablock < hip->first_blocks) {
253 dblock = hfsplus_ext_find_block(hip->first_extents, ablock);
254 goto done;
257 if (inode->i_ino == HFSPLUS_EXT_CNID)
258 return -EIO;
260 mutex_lock(&hip->extents_lock);
263 * hfsplus_ext_read_extent will write out a cached extent into
264 * the extents btree. In that case we may have to mark the inode
265 * dirty even for a pure read of an extent here.
267 was_dirty = (hip->extent_state & HFSPLUS_EXT_DIRTY);
268 res = hfsplus_ext_read_extent(inode, ablock);
269 if (res) {
270 mutex_unlock(&hip->extents_lock);
271 return -EIO;
273 dblock = hfsplus_ext_find_block(hip->cached_extents,
274 ablock - hip->cached_start);
275 mutex_unlock(&hip->extents_lock);
277 done:
278 hfs_dbg(EXTENT, "get_block(%lu): %llu - %u\n",
279 inode->i_ino, (long long)iblock, dblock);
281 mask = (1 << sbi->fs_shift) - 1;
282 sector = ((sector_t)dblock << sbi->fs_shift) +
283 sbi->blockoffset + (iblock & mask);
284 map_bh(bh_result, sb, sector);
286 if (create) {
287 set_buffer_new(bh_result);
288 hip->phys_size += sb->s_blocksize;
289 hip->fs_blocks++;
290 inode_add_bytes(inode, sb->s_blocksize);
292 if (create || was_dirty)
293 mark_inode_dirty(inode);
294 return 0;
297 static void hfsplus_dump_extent(struct hfsplus_extent *extent)
299 int i;
301 hfs_dbg(EXTENT, " ");
302 for (i = 0; i < 8; i++)
303 hfs_dbg_cont(EXTENT, " %u:%u",
304 be32_to_cpu(extent[i].start_block),
305 be32_to_cpu(extent[i].block_count));
306 hfs_dbg_cont(EXTENT, "\n");
309 static int hfsplus_add_extent(struct hfsplus_extent *extent, u32 offset,
310 u32 alloc_block, u32 block_count)
312 u32 count, start;
313 int i;
315 hfsplus_dump_extent(extent);
316 for (i = 0; i < 8; extent++, i++) {
317 count = be32_to_cpu(extent->block_count);
318 if (offset == count) {
319 start = be32_to_cpu(extent->start_block);
320 if (alloc_block != start + count) {
321 if (++i >= 8)
322 return -ENOSPC;
323 extent++;
324 extent->start_block = cpu_to_be32(alloc_block);
325 } else
326 block_count += count;
327 extent->block_count = cpu_to_be32(block_count);
328 return 0;
329 } else if (offset < count)
330 break;
331 offset -= count;
333 /* panic? */
334 return -EIO;
337 static int hfsplus_free_extents(struct super_block *sb,
338 struct hfsplus_extent *extent,
339 u32 offset, u32 block_nr)
341 u32 count, start;
342 int i;
343 int err = 0;
345 hfsplus_dump_extent(extent);
346 for (i = 0; i < 8; extent++, i++) {
347 count = be32_to_cpu(extent->block_count);
348 if (offset == count)
349 goto found;
350 else if (offset < count)
351 break;
352 offset -= count;
354 /* panic? */
355 return -EIO;
356 found:
357 for (;;) {
358 start = be32_to_cpu(extent->start_block);
359 if (count <= block_nr) {
360 err = hfsplus_block_free(sb, start, count);
361 if (err) {
362 pr_err("can't free extent\n");
363 hfs_dbg(EXTENT, " start: %u count: %u\n",
364 start, count);
366 extent->block_count = 0;
367 extent->start_block = 0;
368 block_nr -= count;
369 } else {
370 count -= block_nr;
371 err = hfsplus_block_free(sb, start + count, block_nr);
372 if (err) {
373 pr_err("can't free extent\n");
374 hfs_dbg(EXTENT, " start: %u count: %u\n",
375 start, count);
377 extent->block_count = cpu_to_be32(count);
378 block_nr = 0;
380 if (!block_nr || !i) {
382 * Try to free all extents and
383 * return only last error
385 return err;
387 i--;
388 extent--;
389 count = be32_to_cpu(extent->block_count);
393 int hfsplus_free_fork(struct super_block *sb, u32 cnid,
394 struct hfsplus_fork_raw *fork, int type)
396 struct hfs_find_data fd;
397 hfsplus_extent_rec ext_entry;
398 u32 total_blocks, blocks, start;
399 int res, i;
401 total_blocks = be32_to_cpu(fork->total_blocks);
402 if (!total_blocks)
403 return 0;
405 blocks = 0;
406 for (i = 0; i < 8; i++)
407 blocks += be32_to_cpu(fork->extents[i].block_count);
409 res = hfsplus_free_extents(sb, fork->extents, blocks, blocks);
410 if (res)
411 return res;
412 if (total_blocks == blocks)
413 return 0;
415 res = hfs_find_init(HFSPLUS_SB(sb)->ext_tree, &fd);
416 if (res)
417 return res;
418 do {
419 res = __hfsplus_ext_read_extent(&fd, ext_entry, cnid,
420 total_blocks, type);
421 if (res)
422 break;
423 start = be32_to_cpu(fd.key->ext.start_block);
424 hfsplus_free_extents(sb, ext_entry,
425 total_blocks - start,
426 total_blocks);
427 hfs_brec_remove(&fd);
428 total_blocks = start;
429 } while (total_blocks > blocks);
430 hfs_find_exit(&fd);
432 return res;
435 int hfsplus_file_extend(struct inode *inode, bool zeroout)
437 struct super_block *sb = inode->i_sb;
438 struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
439 struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
440 u32 start, len, goal;
441 int res;
443 if (sbi->alloc_file->i_size * 8 <
444 sbi->total_blocks - sbi->free_blocks + 8) {
445 /* extend alloc file */
446 pr_err("extend alloc file! (%llu,%u,%u)\n",
447 sbi->alloc_file->i_size * 8,
448 sbi->total_blocks, sbi->free_blocks);
449 return -ENOSPC;
452 mutex_lock(&hip->extents_lock);
453 if (hip->alloc_blocks == hip->first_blocks)
454 goal = hfsplus_ext_lastblock(hip->first_extents);
455 else {
456 res = hfsplus_ext_read_extent(inode, hip->alloc_blocks);
457 if (res)
458 goto out;
459 goal = hfsplus_ext_lastblock(hip->cached_extents);
462 len = hip->clump_blocks;
463 start = hfsplus_block_allocate(sb, sbi->total_blocks, goal, &len);
464 if (start >= sbi->total_blocks) {
465 start = hfsplus_block_allocate(sb, goal, 0, &len);
466 if (start >= goal) {
467 res = -ENOSPC;
468 goto out;
472 if (zeroout) {
473 res = sb_issue_zeroout(sb, start, len, GFP_NOFS);
474 if (res)
475 goto out;
478 hfs_dbg(EXTENT, "extend %lu: %u,%u\n", inode->i_ino, start, len);
480 if (hip->alloc_blocks <= hip->first_blocks) {
481 if (!hip->first_blocks) {
482 hfs_dbg(EXTENT, "first extents\n");
483 /* no extents yet */
484 hip->first_extents[0].start_block = cpu_to_be32(start);
485 hip->first_extents[0].block_count = cpu_to_be32(len);
486 res = 0;
487 } else {
488 /* try to append to extents in inode */
489 res = hfsplus_add_extent(hip->first_extents,
490 hip->alloc_blocks,
491 start, len);
492 if (res == -ENOSPC)
493 goto insert_extent;
495 if (!res) {
496 hfsplus_dump_extent(hip->first_extents);
497 hip->first_blocks += len;
499 } else {
500 res = hfsplus_add_extent(hip->cached_extents,
501 hip->alloc_blocks - hip->cached_start,
502 start, len);
503 if (!res) {
504 hfsplus_dump_extent(hip->cached_extents);
505 hip->extent_state |= HFSPLUS_EXT_DIRTY;
506 hip->cached_blocks += len;
507 } else if (res == -ENOSPC)
508 goto insert_extent;
510 out:
511 if (!res) {
512 hip->alloc_blocks += len;
513 mutex_unlock(&hip->extents_lock);
514 hfsplus_mark_inode_dirty(inode, HFSPLUS_I_ALLOC_DIRTY);
515 return 0;
517 mutex_unlock(&hip->extents_lock);
518 return res;
520 insert_extent:
521 hfs_dbg(EXTENT, "insert new extent\n");
522 res = hfsplus_ext_write_extent_locked(inode);
523 if (res)
524 goto out;
526 memset(hip->cached_extents, 0, sizeof(hfsplus_extent_rec));
527 hip->cached_extents[0].start_block = cpu_to_be32(start);
528 hip->cached_extents[0].block_count = cpu_to_be32(len);
529 hfsplus_dump_extent(hip->cached_extents);
530 hip->extent_state |= HFSPLUS_EXT_DIRTY | HFSPLUS_EXT_NEW;
531 hip->cached_start = hip->alloc_blocks;
532 hip->cached_blocks = len;
534 res = 0;
535 goto out;
538 void hfsplus_file_truncate(struct inode *inode)
540 struct super_block *sb = inode->i_sb;
541 struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
542 struct hfs_find_data fd;
543 u32 alloc_cnt, blk_cnt, start;
544 int res;
546 hfs_dbg(INODE, "truncate: %lu, %llu -> %llu\n",
547 inode->i_ino, (long long)hip->phys_size, inode->i_size);
549 if (inode->i_size > hip->phys_size) {
550 struct address_space *mapping = inode->i_mapping;
551 struct page *page;
552 void *fsdata;
553 loff_t size = inode->i_size;
555 res = pagecache_write_begin(NULL, mapping, size, 0, 0,
556 &page, &fsdata);
557 if (res)
558 return;
559 res = pagecache_write_end(NULL, mapping, size,
560 0, 0, page, fsdata);
561 if (res < 0)
562 return;
563 mark_inode_dirty(inode);
564 return;
565 } else if (inode->i_size == hip->phys_size)
566 return;
568 blk_cnt = (inode->i_size + HFSPLUS_SB(sb)->alloc_blksz - 1) >>
569 HFSPLUS_SB(sb)->alloc_blksz_shift;
571 mutex_lock(&hip->extents_lock);
573 alloc_cnt = hip->alloc_blocks;
574 if (blk_cnt == alloc_cnt)
575 goto out_unlock;
577 res = hfs_find_init(HFSPLUS_SB(sb)->ext_tree, &fd);
578 if (res) {
579 mutex_unlock(&hip->extents_lock);
580 /* XXX: We lack error handling of hfsplus_file_truncate() */
581 return;
583 while (1) {
584 if (alloc_cnt == hip->first_blocks) {
585 hfsplus_free_extents(sb, hip->first_extents,
586 alloc_cnt, alloc_cnt - blk_cnt);
587 hfsplus_dump_extent(hip->first_extents);
588 hip->first_blocks = blk_cnt;
589 break;
591 res = __hfsplus_ext_cache_extent(&fd, inode, alloc_cnt);
592 if (res)
593 break;
594 start = hip->cached_start;
595 hfsplus_free_extents(sb, hip->cached_extents,
596 alloc_cnt - start, alloc_cnt - blk_cnt);
597 hfsplus_dump_extent(hip->cached_extents);
598 if (blk_cnt > start) {
599 hip->extent_state |= HFSPLUS_EXT_DIRTY;
600 break;
602 alloc_cnt = start;
603 hip->cached_start = hip->cached_blocks = 0;
604 hip->extent_state &= ~(HFSPLUS_EXT_DIRTY | HFSPLUS_EXT_NEW);
605 hfs_brec_remove(&fd);
607 hfs_find_exit(&fd);
609 hip->alloc_blocks = blk_cnt;
610 out_unlock:
611 mutex_unlock(&hip->extents_lock);
612 hip->phys_size = inode->i_size;
613 hip->fs_blocks = (inode->i_size + sb->s_blocksize - 1) >>
614 sb->s_blocksize_bits;
615 inode_set_bytes(inode, hip->fs_blocks << sb->s_blocksize_bits);
616 hfsplus_mark_inode_dirty(inode, HFSPLUS_I_ALLOC_DIRTY);