1 // SPDX-License-Identifier: GPL-2.0
3 * linux/fs/sysv/itree.c
5 * Handling of indirect blocks' trees.
9 #include <linux/buffer_head.h>
10 #include <linux/mount.h>
11 #include <linux/mpage.h>
12 #include <linux/string.h>
15 enum {DIRECT
= 10, DEPTH
= 4}; /* Have triple indirect */
17 static inline void dirty_indirect(struct buffer_head
*bh
, struct inode
*inode
)
19 mark_buffer_dirty_inode(bh
, inode
);
21 sync_dirty_buffer(bh
);
24 static int block_to_path(struct inode
*inode
, long block
, int offsets
[DEPTH
])
26 struct super_block
*sb
= inode
->i_sb
;
27 struct sysv_sb_info
*sbi
= SYSV_SB(sb
);
28 int ptrs_bits
= sbi
->s_ind_per_block_bits
;
29 unsigned long indirect_blocks
= sbi
->s_ind_per_block
,
30 double_blocks
= sbi
->s_ind_per_block_2
;
34 printk("sysv_block_map: block < 0\n");
35 } else if (block
< DIRECT
) {
37 } else if ( (block
-= DIRECT
) < indirect_blocks
) {
38 offsets
[n
++] = DIRECT
;
40 } else if ((block
-= indirect_blocks
) < double_blocks
) {
41 offsets
[n
++] = DIRECT
+1;
42 offsets
[n
++] = block
>> ptrs_bits
;
43 offsets
[n
++] = block
& (indirect_blocks
- 1);
44 } else if (((block
-= double_blocks
) >> (ptrs_bits
* 2)) < indirect_blocks
) {
45 offsets
[n
++] = DIRECT
+2;
46 offsets
[n
++] = block
>> (ptrs_bits
* 2);
47 offsets
[n
++] = (block
>> ptrs_bits
) & (indirect_blocks
- 1);
48 offsets
[n
++] = block
& (indirect_blocks
- 1);
55 static inline int block_to_cpu(struct sysv_sb_info
*sbi
, sysv_zone_t nr
)
57 return sbi
->s_block_base
+ fs32_to_cpu(sbi
, nr
);
63 struct buffer_head
*bh
;
66 static DEFINE_RWLOCK(pointers_lock
);
68 static inline void add_chain(Indirect
*p
, struct buffer_head
*bh
, sysv_zone_t
*v
)
74 static inline int verify_chain(Indirect
*from
, Indirect
*to
)
76 while (from
<= to
&& from
->key
== *from
->p
)
81 static inline sysv_zone_t
*block_end(struct buffer_head
*bh
)
83 return (sysv_zone_t
*)((char*)bh
->b_data
+ bh
->b_size
);
86 static Indirect
*get_branch(struct inode
*inode
,
92 struct super_block
*sb
= inode
->i_sb
;
94 struct buffer_head
*bh
;
97 add_chain(chain
, NULL
, SYSV_I(inode
)->i_data
+ *offsets
);
101 int block
= block_to_cpu(SYSV_SB(sb
), p
->key
);
102 bh
= sb_bread(sb
, block
);
105 read_lock(&pointers_lock
);
106 if (!verify_chain(chain
, p
))
108 add_chain(++p
, bh
, (sysv_zone_t
*)bh
->b_data
+ *++offsets
);
109 read_unlock(&pointers_lock
);
116 read_unlock(&pointers_lock
);
126 static int alloc_branch(struct inode
*inode
,
131 int blocksize
= inode
->i_sb
->s_blocksize
;
135 branch
[0].key
= sysv_new_block(inode
->i_sb
);
136 if (branch
[0].key
) for (n
= 1; n
< num
; n
++) {
137 struct buffer_head
*bh
;
139 /* Allocate the next block */
140 branch
[n
].key
= sysv_new_block(inode
->i_sb
);
144 * Get buffer_head for parent block, zero it out and set
145 * the pointer to new one, then send parent to disk.
147 parent
= block_to_cpu(SYSV_SB(inode
->i_sb
), branch
[n
-1].key
);
148 bh
= sb_getblk(inode
->i_sb
, parent
);
150 sysv_free_block(inode
->i_sb
, branch
[n
].key
);
154 memset(bh
->b_data
, 0, blocksize
);
156 branch
[n
].p
= (sysv_zone_t
*) bh
->b_data
+ offsets
[n
];
157 *branch
[n
].p
= branch
[n
].key
;
158 set_buffer_uptodate(bh
);
160 dirty_indirect(bh
, inode
);
165 /* Allocation failed, free what we already allocated */
166 for (i
= 1; i
< n
; i
++)
167 bforget(branch
[i
].bh
);
168 for (i
= 0; i
< n
; i
++)
169 sysv_free_block(inode
->i_sb
, branch
[i
].key
);
173 static inline int splice_branch(struct inode
*inode
,
180 /* Verify that place we are splicing to is still there and vacant */
181 write_lock(&pointers_lock
);
182 if (!verify_chain(chain
, where
-1) || *where
->p
)
184 *where
->p
= where
->key
;
185 write_unlock(&pointers_lock
);
187 inode_set_ctime_current(inode
);
189 /* had we spliced it onto indirect block? */
191 dirty_indirect(where
->bh
, inode
);
194 sysv_sync_inode(inode
);
196 mark_inode_dirty(inode
);
200 write_unlock(&pointers_lock
);
201 for (i
= 1; i
< num
; i
++)
202 bforget(where
[i
].bh
);
203 for (i
= 0; i
< num
; i
++)
204 sysv_free_block(inode
->i_sb
, where
[i
].key
);
208 static int get_block(struct inode
*inode
, sector_t iblock
, struct buffer_head
*bh_result
, int create
)
212 Indirect chain
[DEPTH
];
213 struct super_block
*sb
= inode
->i_sb
;
216 int depth
= block_to_path(inode
, iblock
, offsets
);
222 partial
= get_branch(inode
, depth
, offsets
, chain
, &err
);
224 /* Simplest case - block found, no allocation needed */
227 map_bh(bh_result
, sb
, block_to_cpu(SYSV_SB(sb
),
228 chain
[depth
-1].key
));
229 /* Clean up and exit */
230 partial
= chain
+depth
-1; /* the whole chain */
234 /* Next simple case - plain lookup or failed read of indirect block */
235 if (!create
|| err
== -EIO
) {
237 while (partial
> chain
) {
246 * Indirect block might be removed by truncate while we were
247 * reading it. Handling of that case (forget what we've got and
248 * reread) is taken out of the main path.
253 left
= (chain
+ depth
) - partial
;
254 err
= alloc_branch(inode
, left
, offsets
+(partial
-chain
), partial
);
258 if (splice_branch(inode
, chain
, partial
, left
) < 0)
261 set_buffer_new(bh_result
);
265 while (partial
> chain
) {
272 static inline int all_zeroes(sysv_zone_t
*p
, sysv_zone_t
*q
)
280 static Indirect
*find_shared(struct inode
*inode
,
286 Indirect
*partial
, *p
;
290 for (k
= depth
; k
> 1 && !offsets
[k
-1]; k
--)
292 partial
= get_branch(inode
, k
, offsets
, chain
, &err
);
294 write_lock(&pointers_lock
);
296 partial
= chain
+ k
-1;
298 * If the branch acquired continuation since we've looked at it -
299 * fine, it should all survive and (new) top doesn't belong to us.
301 if (!partial
->key
&& *partial
->p
) {
302 write_unlock(&pointers_lock
);
305 for (p
=partial
; p
>chain
&& all_zeroes((sysv_zone_t
*)p
->bh
->b_data
,p
->p
); p
--)
308 * OK, we've found the last block that must survive. The rest of our
309 * branch should be detached before unlocking. However, if that rest
310 * of branch is all ours and does not grow immediately from the inode
311 * it's easier to cheat and just decrement partial->p.
313 if (p
== chain
+ k
- 1 && p
> chain
) {
319 write_unlock(&pointers_lock
);
321 while (partial
> p
) {
329 static inline void free_data(struct inode
*inode
, sysv_zone_t
*p
, sysv_zone_t
*q
)
331 for ( ; p
< q
; p
++) {
335 sysv_free_block(inode
->i_sb
, nr
);
336 mark_inode_dirty(inode
);
341 static void free_branches(struct inode
*inode
, sysv_zone_t
*p
, sysv_zone_t
*q
, int depth
)
343 struct buffer_head
* bh
;
344 struct super_block
*sb
= inode
->i_sb
;
347 for ( ; p
< q
; p
++) {
353 block
= block_to_cpu(SYSV_SB(sb
), nr
);
354 bh
= sb_bread(sb
, block
);
357 free_branches(inode
, (sysv_zone_t
*)bh
->b_data
,
358 block_end(bh
), depth
);
360 sysv_free_block(sb
, nr
);
361 mark_inode_dirty(inode
);
364 free_data(inode
, p
, q
);
367 void sysv_truncate (struct inode
* inode
)
369 sysv_zone_t
*i_data
= SYSV_I(inode
)->i_data
;
371 Indirect chain
[DEPTH
];
378 if (!(S_ISREG(inode
->i_mode
) || S_ISDIR(inode
->i_mode
) ||
379 S_ISLNK(inode
->i_mode
)))
382 blocksize
= inode
->i_sb
->s_blocksize
;
383 iblock
= (inode
->i_size
+ blocksize
-1)
384 >> inode
->i_sb
->s_blocksize_bits
;
386 block_truncate_page(inode
->i_mapping
, inode
->i_size
, get_block
);
388 n
= block_to_path(inode
, iblock
, offsets
);
393 free_data(inode
, i_data
+offsets
[0], i_data
+ DIRECT
);
397 partial
= find_shared(inode
, n
, offsets
, chain
, &nr
);
398 /* Kill the top of shared branch (already detached) */
400 if (partial
== chain
)
401 mark_inode_dirty(inode
);
403 dirty_indirect(partial
->bh
, inode
);
404 free_branches(inode
, &nr
, &nr
+1, (chain
+n
-1) - partial
);
406 /* Clear the ends of indirect blocks on the shared branch */
407 while (partial
> chain
) {
408 free_branches(inode
, partial
->p
+ 1, block_end(partial
->bh
),
409 (chain
+n
-1) - partial
);
410 dirty_indirect(partial
->bh
, inode
);
411 brelse (partial
->bh
);
415 /* Kill the remaining (whole) subtrees (== subtrees deeper than...) */
417 nr
= i_data
[DIRECT
+ n
- 1];
419 i_data
[DIRECT
+ n
- 1] = 0;
420 mark_inode_dirty(inode
);
421 free_branches(inode
, &nr
, &nr
+1, n
);
425 inode_set_mtime_to_ts(inode
, inode_set_ctime_current(inode
));
427 sysv_sync_inode (inode
);
429 mark_inode_dirty(inode
);
432 static unsigned sysv_nblocks(struct super_block
*s
, loff_t size
)
434 struct sysv_sb_info
*sbi
= SYSV_SB(s
);
435 int ptrs_bits
= sbi
->s_ind_per_block_bits
;
436 unsigned blocks
, res
, direct
= DIRECT
, i
= DEPTH
;
437 blocks
= (size
+ s
->s_blocksize
- 1) >> s
->s_blocksize_bits
;
439 while (--i
&& blocks
> direct
) {
440 blocks
= ((blocks
- direct
- 1) >> ptrs_bits
) + 1;
447 int sysv_getattr(struct mnt_idmap
*idmap
, const struct path
*path
,
448 struct kstat
*stat
, u32 request_mask
, unsigned int flags
)
450 struct super_block
*s
= path
->dentry
->d_sb
;
451 generic_fillattr(&nop_mnt_idmap
, request_mask
, d_inode(path
->dentry
),
453 stat
->blocks
= (s
->s_blocksize
/ 512) * sysv_nblocks(s
, stat
->size
);
454 stat
->blksize
= s
->s_blocksize
;
458 static int sysv_writepages(struct address_space
*mapping
,
459 struct writeback_control
*wbc
)
461 return mpage_writepages(mapping
, wbc
, get_block
);
464 static int sysv_read_folio(struct file
*file
, struct folio
*folio
)
466 return block_read_full_folio(folio
, get_block
);
469 int sysv_prepare_chunk(struct folio
*folio
, loff_t pos
, unsigned len
)
471 return __block_write_begin(folio
, pos
, len
, get_block
);
474 static void sysv_write_failed(struct address_space
*mapping
, loff_t to
)
476 struct inode
*inode
= mapping
->host
;
478 if (to
> inode
->i_size
) {
479 truncate_pagecache(inode
, inode
->i_size
);
480 sysv_truncate(inode
);
484 static int sysv_write_begin(struct file
*file
, struct address_space
*mapping
,
485 loff_t pos
, unsigned len
,
486 struct folio
**foliop
, void **fsdata
)
490 ret
= block_write_begin(mapping
, pos
, len
, foliop
, get_block
);
492 sysv_write_failed(mapping
, pos
+ len
);
497 static sector_t
sysv_bmap(struct address_space
*mapping
, sector_t block
)
499 return generic_block_bmap(mapping
,block
,get_block
);
502 const struct address_space_operations sysv_aops
= {
503 .dirty_folio
= block_dirty_folio
,
504 .invalidate_folio
= block_invalidate_folio
,
505 .read_folio
= sysv_read_folio
,
506 .writepages
= sysv_writepages
,
507 .write_begin
= sysv_write_begin
,
508 .write_end
= generic_write_end
,
509 .migrate_folio
= buffer_migrate_folio
,