1 // SPDX-License-Identifier: GPL-2.0
7 struct buffer_head
*bh
;
10 static DEFINE_RWLOCK(pointers_lock
);
12 static inline void add_chain(Indirect
*p
, struct buffer_head
*bh
, block_t
*v
)
18 static inline int verify_chain(Indirect
*from
, Indirect
*to
)
20 while (from
<= to
&& from
->key
== *from
->p
)
25 static inline block_t
*block_end(struct buffer_head
*bh
)
27 return (block_t
*)((char*)bh
->b_data
+ bh
->b_size
);
30 static inline Indirect
*get_branch(struct inode
*inode
,
33 Indirect chain
[DEPTH
],
36 struct super_block
*sb
= inode
->i_sb
;
38 struct buffer_head
*bh
;
41 /* i_data is not going away, no lock needed */
42 add_chain (chain
, NULL
, i_data(inode
) + *offsets
);
46 bh
= sb_bread(sb
, block_to_cpu(p
->key
));
49 read_lock(&pointers_lock
);
50 if (!verify_chain(chain
, p
))
52 add_chain(++p
, bh
, (block_t
*)bh
->b_data
+ *++offsets
);
53 read_unlock(&pointers_lock
);
60 read_unlock(&pointers_lock
);
70 static int alloc_branch(struct inode
*inode
,
77 int parent
= minix_new_block(inode
);
79 branch
[0].key
= cpu_to_block(parent
);
80 if (parent
) for (n
= 1; n
< num
; n
++) {
81 struct buffer_head
*bh
;
82 /* Allocate the next block */
83 int nr
= minix_new_block(inode
);
86 branch
[n
].key
= cpu_to_block(nr
);
87 bh
= sb_getblk(inode
->i_sb
, parent
);
89 memset(bh
->b_data
, 0, bh
->b_size
);
91 branch
[n
].p
= (block_t
*) bh
->b_data
+ offsets
[n
];
92 *branch
[n
].p
= branch
[n
].key
;
93 set_buffer_uptodate(bh
);
95 mark_buffer_dirty_inode(bh
, inode
);
101 /* Allocation failed, free what we already allocated */
102 for (i
= 1; i
< n
; i
++)
103 bforget(branch
[i
].bh
);
104 for (i
= 0; i
< n
; i
++)
105 minix_free_block(inode
, block_to_cpu(branch
[i
].key
));
109 static inline int splice_branch(struct inode
*inode
,
110 Indirect chain
[DEPTH
],
116 write_lock(&pointers_lock
);
118 /* Verify that place we are splicing to is still there and vacant */
119 if (!verify_chain(chain
, where
-1) || *where
->p
)
122 *where
->p
= where
->key
;
124 write_unlock(&pointers_lock
);
126 /* We are done with atomic stuff, now do the rest of housekeeping */
128 inode
->i_ctime
= current_time(inode
);
130 /* had we spliced it onto indirect block? */
132 mark_buffer_dirty_inode(where
->bh
, inode
);
134 mark_inode_dirty(inode
);
138 write_unlock(&pointers_lock
);
139 for (i
= 1; i
< num
; i
++)
140 bforget(where
[i
].bh
);
141 for (i
= 0; i
< num
; i
++)
142 minix_free_block(inode
, block_to_cpu(where
[i
].key
));
146 static int get_block(struct inode
* inode
, sector_t block
,
147 struct buffer_head
*bh
, int create
)
151 Indirect chain
[DEPTH
];
154 int depth
= block_to_path(inode
, block
, offsets
);
160 partial
= get_branch(inode
, depth
, offsets
, chain
, &err
);
162 /* Simplest case - block found, no allocation needed */
165 map_bh(bh
, inode
->i_sb
, block_to_cpu(chain
[depth
-1].key
));
166 /* Clean up and exit */
167 partial
= chain
+depth
-1; /* the whole chain */
171 /* Next simple case - plain lookup or failed read of indirect block */
172 if (!create
|| err
== -EIO
) {
174 while (partial
> chain
) {
183 * Indirect block might be removed by truncate while we were
184 * reading it. Handling of that case (forget what we've got and
185 * reread) is taken out of the main path.
190 left
= (chain
+ depth
) - partial
;
191 err
= alloc_branch(inode
, left
, offsets
+(partial
-chain
), partial
);
195 if (splice_branch(inode
, chain
, partial
, left
) < 0)
202 while (partial
> chain
) {
209 static inline int all_zeroes(block_t
*p
, block_t
*q
)
217 static Indirect
*find_shared(struct inode
*inode
,
220 Indirect chain
[DEPTH
],
223 Indirect
*partial
, *p
;
227 for (k
= depth
; k
> 1 && !offsets
[k
-1]; k
--)
229 partial
= get_branch(inode
, k
, offsets
, chain
, &err
);
231 write_lock(&pointers_lock
);
233 partial
= chain
+ k
-1;
234 if (!partial
->key
&& *partial
->p
) {
235 write_unlock(&pointers_lock
);
238 for (p
=partial
;p
>chain
&& all_zeroes((block_t
*)p
->bh
->b_data
,p
->p
);p
--)
240 if (p
== chain
+ k
- 1 && p
> chain
) {
246 write_unlock(&pointers_lock
);
257 static inline void free_data(struct inode
*inode
, block_t
*p
, block_t
*q
)
261 for ( ; p
< q
; p
++) {
262 nr
= block_to_cpu(*p
);
265 minix_free_block(inode
, nr
);
270 static void free_branches(struct inode
*inode
, block_t
*p
, block_t
*q
, int depth
)
272 struct buffer_head
* bh
;
276 for ( ; p
< q
; p
++) {
277 nr
= block_to_cpu(*p
);
281 bh
= sb_bread(inode
->i_sb
, nr
);
284 free_branches(inode
, (block_t
*)bh
->b_data
,
285 block_end(bh
), depth
);
287 minix_free_block(inode
, nr
);
288 mark_inode_dirty(inode
);
291 free_data(inode
, p
, q
);
294 static inline void truncate (struct inode
* inode
)
296 struct super_block
*sb
= inode
->i_sb
;
297 block_t
*idata
= i_data(inode
);
299 Indirect chain
[DEPTH
];
306 iblock
= (inode
->i_size
+ sb
->s_blocksize
-1) >> sb
->s_blocksize_bits
;
307 block_truncate_page(inode
->i_mapping
, inode
->i_size
, get_block
);
309 n
= block_to_path(inode
, iblock
, offsets
);
314 free_data(inode
, idata
+offsets
[0], idata
+ DIRECT
);
319 first_whole
= offsets
[0] + 1 - DIRECT
;
320 partial
= find_shared(inode
, n
, offsets
, chain
, &nr
);
322 if (partial
== chain
)
323 mark_inode_dirty(inode
);
325 mark_buffer_dirty_inode(partial
->bh
, inode
);
326 free_branches(inode
, &nr
, &nr
+1, (chain
+n
-1) - partial
);
328 /* Clear the ends of indirect blocks on the shared branch */
329 while (partial
> chain
) {
330 free_branches(inode
, partial
->p
+ 1, block_end(partial
->bh
),
331 (chain
+n
-1) - partial
);
332 mark_buffer_dirty_inode(partial
->bh
, inode
);
333 brelse (partial
->bh
);
337 /* Kill the remaining (whole) subtrees */
338 while (first_whole
< DEPTH
-1) {
339 nr
= idata
[DIRECT
+first_whole
];
341 idata
[DIRECT
+first_whole
] = 0;
342 mark_inode_dirty(inode
);
343 free_branches(inode
, &nr
, &nr
+1, first_whole
+1);
347 inode
->i_mtime
= inode
->i_ctime
= current_time(inode
);
348 mark_inode_dirty(inode
);
351 static inline unsigned nblocks(loff_t size
, struct super_block
*sb
)
353 int k
= sb
->s_blocksize_bits
- 10;
354 unsigned blocks
, res
, direct
= DIRECT
, i
= DEPTH
;
355 blocks
= (size
+ sb
->s_blocksize
- 1) >> (BLOCK_SIZE_BITS
+ k
);
357 while (--i
&& blocks
> direct
) {
359 blocks
+= sb
->s_blocksize
/sizeof(block_t
) - 1;
360 blocks
/= sb
->s_blocksize
/sizeof(block_t
);