6 struct buffer_head
*bh
;
9 static rwlock_t pointers_lock
= RW_LOCK_UNLOCKED
;
11 static inline void add_chain(Indirect
*p
, struct buffer_head
*bh
, block_t
*v
)
17 static inline int verify_chain(Indirect
*from
, Indirect
*to
)
19 while (from
<= to
&& from
->key
== *from
->p
)
24 static inline block_t
*block_end(struct buffer_head
*bh
)
26 return (block_t
*)((char*)bh
->b_data
+ BLOCK_SIZE
);
29 static inline Indirect
*get_branch(struct inode
*inode
,
32 Indirect chain
[DEPTH
],
35 struct super_block
*sb
= inode
->i_sb
;
37 struct buffer_head
*bh
;
40 /* i_data is not going away, no lock needed */
41 add_chain (chain
, NULL
, i_data(inode
) + *offsets
);
45 bh
= sb_bread(sb
, block_to_cpu(p
->key
));
48 read_lock(&pointers_lock
);
49 if (!verify_chain(chain
, p
))
51 add_chain(++p
, bh
, (block_t
*)bh
->b_data
+ *++offsets
);
52 read_unlock(&pointers_lock
);
59 read_unlock(&pointers_lock
);
69 static int alloc_branch(struct inode
*inode
,
76 int parent
= minix_new_block(inode
);
78 branch
[0].key
= cpu_to_block(parent
);
79 if (parent
) for (n
= 1; n
< num
; n
++) {
80 struct buffer_head
*bh
;
81 /* Allocate the next block */
82 int nr
= minix_new_block(inode
);
85 branch
[n
].key
= cpu_to_block(nr
);
86 bh
= sb_getblk(inode
->i_sb
, parent
);
88 memset(bh
->b_data
, 0, BLOCK_SIZE
);
90 branch
[n
].p
= (block_t
*) bh
->b_data
+ offsets
[n
];
91 *branch
[n
].p
= branch
[n
].key
;
92 set_buffer_uptodate(bh
);
94 mark_buffer_dirty_inode(bh
, inode
);
100 /* Allocation failed, free what we already allocated */
101 for (i
= 1; i
< n
; i
++)
102 bforget(branch
[i
].bh
);
103 for (i
= 0; i
< n
; i
++)
104 minix_free_block(inode
, block_to_cpu(branch
[i
].key
));
108 static inline int splice_branch(struct inode
*inode
,
109 Indirect chain
[DEPTH
],
115 write_lock(&pointers_lock
);
117 /* Verify that place we are splicing to is still there and vacant */
118 if (!verify_chain(chain
, where
-1) || *where
->p
)
121 *where
->p
= where
->key
;
123 write_unlock(&pointers_lock
);
125 /* We are done with atomic stuff, now do the rest of housekeeping */
127 inode
->i_ctime
= CURRENT_TIME
;
129 /* had we spliced it onto indirect block? */
131 mark_buffer_dirty_inode(where
->bh
, inode
);
133 mark_inode_dirty(inode
);
137 write_unlock(&pointers_lock
);
138 for (i
= 1; i
< num
; i
++)
139 bforget(where
[i
].bh
);
140 for (i
= 0; i
< num
; i
++)
141 minix_free_block(inode
, block_to_cpu(where
[i
].key
));
145 static inline int get_block(struct inode
* inode
, sector_t block
,
146 struct buffer_head
*bh
, int create
)
150 Indirect chain
[DEPTH
];
153 int depth
= block_to_path(inode
, block
, offsets
);
159 partial
= get_branch(inode
, depth
, offsets
, chain
, &err
);
161 /* Simplest case - block found, no allocation needed */
164 map_bh(bh
, inode
->i_sb
, block_to_cpu(chain
[depth
-1].key
));
165 /* Clean up and exit */
166 partial
= chain
+depth
-1; /* the whole chain */
170 /* Next simple case - plain lookup or failed read of indirect block */
171 if (!create
|| err
== -EIO
) {
173 while (partial
> chain
) {
182 * Indirect block might be removed by truncate while we were
183 * reading it. Handling of that case (forget what we've got and
184 * reread) is taken out of the main path.
189 left
= (chain
+ depth
) - partial
;
190 err
= alloc_branch(inode
, left
, offsets
+(partial
-chain
), partial
);
194 if (splice_branch(inode
, chain
, partial
, left
) < 0)
201 while (partial
> chain
) {
208 static inline int all_zeroes(block_t
*p
, block_t
*q
)
216 static Indirect
*find_shared(struct inode
*inode
,
219 Indirect chain
[DEPTH
],
222 Indirect
*partial
, *p
;
226 for (k
= depth
; k
> 1 && !offsets
[k
-1]; k
--)
228 partial
= get_branch(inode
, k
, offsets
, chain
, &err
);
230 write_lock(&pointers_lock
);
232 partial
= chain
+ k
-1;
233 if (!partial
->key
&& *partial
->p
) {
234 write_unlock(&pointers_lock
);
237 for (p
=partial
;p
>chain
&& all_zeroes((block_t
*)p
->bh
->b_data
,p
->p
);p
--)
239 if (p
== chain
+ k
- 1 && p
> chain
) {
245 write_unlock(&pointers_lock
);
256 static inline void free_data(struct inode
*inode
, block_t
*p
, block_t
*q
)
260 for ( ; p
< q
; p
++) {
261 nr
= block_to_cpu(*p
);
264 minix_free_block(inode
, nr
);
269 static void free_branches(struct inode
*inode
, block_t
*p
, block_t
*q
, int depth
)
271 struct buffer_head
* bh
;
275 for ( ; p
< q
; p
++) {
276 nr
= block_to_cpu(*p
);
280 bh
= sb_bread(inode
->i_sb
, nr
);
283 free_branches(inode
, (block_t
*)bh
->b_data
,
284 block_end(bh
), depth
);
286 minix_free_block(inode
, nr
);
287 mark_inode_dirty(inode
);
290 free_data(inode
, p
, q
);
293 static inline void truncate (struct inode
* inode
)
295 block_t
*idata
= i_data(inode
);
297 Indirect chain
[DEPTH
];
304 iblock
= (inode
->i_size
+ BLOCK_SIZE
-1) >> 10;
305 block_truncate_page(inode
->i_mapping
, inode
->i_size
, get_block
);
307 n
= block_to_path(inode
, iblock
, offsets
);
312 free_data(inode
, idata
+offsets
[0], idata
+ DIRECT
);
317 first_whole
= offsets
[0] + 1 - DIRECT
;
318 partial
= find_shared(inode
, n
, offsets
, chain
, &nr
);
320 if (partial
== chain
)
321 mark_inode_dirty(inode
);
323 mark_buffer_dirty_inode(partial
->bh
, inode
);
324 free_branches(inode
, &nr
, &nr
+1, (chain
+n
-1) - partial
);
326 /* Clear the ends of indirect blocks on the shared branch */
327 while (partial
> chain
) {
328 free_branches(inode
, partial
->p
+ 1, block_end(partial
->bh
),
329 (chain
+n
-1) - partial
);
330 mark_buffer_dirty_inode(partial
->bh
, inode
);
331 brelse (partial
->bh
);
335 /* Kill the remaining (whole) subtrees */
336 while (first_whole
< DEPTH
-1) {
337 nr
= idata
[DIRECT
+first_whole
];
339 idata
[DIRECT
+first_whole
] = 0;
340 mark_inode_dirty(inode
);
341 free_branches(inode
, &nr
, &nr
+1, first_whole
+1);
345 inode
->i_mtime
= inode
->i_ctime
= CURRENT_TIME
;
346 mark_inode_dirty(inode
);
349 static inline unsigned nblocks(loff_t size
)
351 unsigned blocks
, res
, direct
= DIRECT
, i
= DEPTH
;
352 blocks
= (size
+ BLOCK_SIZE
- 1) >> BLOCK_SIZE_BITS
;
354 while (--i
&& blocks
> direct
) {
356 blocks
+= BLOCK_SIZE
/sizeof(block_t
) - 1;
357 blocks
/= BLOCK_SIZE
/sizeof(block_t
);