6 struct buffer_head
*bh
;
9 static DEFINE_RWLOCK(pointers_lock
);
11 static inline void add_chain(Indirect
*p
, struct buffer_head
*bh
, block_t
*v
)
17 static inline int verify_chain(Indirect
*from
, Indirect
*to
)
19 while (from
<= to
&& from
->key
== *from
->p
)
24 static inline block_t
*block_end(struct buffer_head
*bh
)
26 return (block_t
*)((char*)bh
->b_data
+ bh
->b_size
);
29 static inline Indirect
*get_branch(struct inode
*inode
,
32 Indirect chain
[DEPTH
],
35 struct super_block
*sb
= inode
->i_sb
;
37 struct buffer_head
*bh
;
40 /* i_data is not going away, no lock needed */
41 add_chain (chain
, NULL
, i_data(inode
) + *offsets
);
45 bh
= sb_bread(sb
, block_to_cpu(p
->key
));
48 read_lock(&pointers_lock
);
49 if (!verify_chain(chain
, p
))
51 add_chain(++p
, bh
, (block_t
*)bh
->b_data
+ *++offsets
);
52 read_unlock(&pointers_lock
);
59 read_unlock(&pointers_lock
);
69 static int alloc_branch(struct inode
*inode
,
76 int parent
= minix_new_block(inode
);
79 branch
[0].key
= cpu_to_block(parent
);
80 if (parent
) for (n
= 1; n
< num
; n
++) {
81 struct buffer_head
*bh
;
82 /* Allocate the next block */
83 int nr
= minix_new_block(inode
);
86 branch
[n
].key
= cpu_to_block(nr
);
87 bh
= sb_getblk(inode
->i_sb
, parent
);
89 minix_free_block(inode
, nr
);
94 memset(bh
->b_data
, 0, bh
->b_size
);
96 branch
[n
].p
= (block_t
*) bh
->b_data
+ offsets
[n
];
97 *branch
[n
].p
= branch
[n
].key
;
98 set_buffer_uptodate(bh
);
100 mark_buffer_dirty_inode(bh
, inode
);
106 /* Allocation failed, free what we already allocated */
107 for (i
= 1; i
< n
; i
++)
108 bforget(branch
[i
].bh
);
109 for (i
= 0; i
< n
; i
++)
110 minix_free_block(inode
, block_to_cpu(branch
[i
].key
));
114 static inline int splice_branch(struct inode
*inode
,
115 Indirect chain
[DEPTH
],
121 write_lock(&pointers_lock
);
123 /* Verify that place we are splicing to is still there and vacant */
124 if (!verify_chain(chain
, where
-1) || *where
->p
)
127 *where
->p
= where
->key
;
129 write_unlock(&pointers_lock
);
131 /* We are done with atomic stuff, now do the rest of housekeeping */
133 inode
->i_ctime
= CURRENT_TIME_SEC
;
135 /* had we spliced it onto indirect block? */
137 mark_buffer_dirty_inode(where
->bh
, inode
);
139 mark_inode_dirty(inode
);
143 write_unlock(&pointers_lock
);
144 for (i
= 1; i
< num
; i
++)
145 bforget(where
[i
].bh
);
146 for (i
= 0; i
< num
; i
++)
147 minix_free_block(inode
, block_to_cpu(where
[i
].key
));
151 static inline int get_block(struct inode
* inode
, sector_t block
,
152 struct buffer_head
*bh
, int create
)
156 Indirect chain
[DEPTH
];
159 int depth
= block_to_path(inode
, block
, offsets
);
165 partial
= get_branch(inode
, depth
, offsets
, chain
, &err
);
167 /* Simplest case - block found, no allocation needed */
170 map_bh(bh
, inode
->i_sb
, block_to_cpu(chain
[depth
-1].key
));
171 /* Clean up and exit */
172 partial
= chain
+depth
-1; /* the whole chain */
176 /* Next simple case - plain lookup or failed read of indirect block */
177 if (!create
|| err
== -EIO
) {
179 while (partial
> chain
) {
188 * Indirect block might be removed by truncate while we were
189 * reading it. Handling of that case (forget what we've got and
190 * reread) is taken out of the main path.
195 left
= (chain
+ depth
) - partial
;
196 err
= alloc_branch(inode
, left
, offsets
+(partial
-chain
), partial
);
200 if (splice_branch(inode
, chain
, partial
, left
) < 0)
207 while (partial
> chain
) {
214 static inline int all_zeroes(block_t
*p
, block_t
*q
)
222 static Indirect
*find_shared(struct inode
*inode
,
225 Indirect chain
[DEPTH
],
228 Indirect
*partial
, *p
;
232 for (k
= depth
; k
> 1 && !offsets
[k
-1]; k
--)
234 partial
= get_branch(inode
, k
, offsets
, chain
, &err
);
236 write_lock(&pointers_lock
);
238 partial
= chain
+ k
-1;
239 if (!partial
->key
&& *partial
->p
) {
240 write_unlock(&pointers_lock
);
243 for (p
=partial
;p
>chain
&& all_zeroes((block_t
*)p
->bh
->b_data
,p
->p
);p
--)
245 if (p
== chain
+ k
- 1 && p
> chain
) {
251 write_unlock(&pointers_lock
);
262 static inline void free_data(struct inode
*inode
, block_t
*p
, block_t
*q
)
266 for ( ; p
< q
; p
++) {
267 nr
= block_to_cpu(*p
);
270 minix_free_block(inode
, nr
);
275 static void free_branches(struct inode
*inode
, block_t
*p
, block_t
*q
, int depth
)
277 struct buffer_head
* bh
;
281 for ( ; p
< q
; p
++) {
282 nr
= block_to_cpu(*p
);
286 bh
= sb_bread(inode
->i_sb
, nr
);
289 free_branches(inode
, (block_t
*)bh
->b_data
,
290 block_end(bh
), depth
);
292 minix_free_block(inode
, nr
);
293 mark_inode_dirty(inode
);
296 free_data(inode
, p
, q
);
299 static inline void truncate (struct inode
* inode
)
301 struct super_block
*sb
= inode
->i_sb
;
302 block_t
*idata
= i_data(inode
);
304 Indirect chain
[DEPTH
];
311 iblock
= (inode
->i_size
+ sb
->s_blocksize
-1) >> sb
->s_blocksize_bits
;
312 block_truncate_page(inode
->i_mapping
, inode
->i_size
, get_block
);
314 n
= block_to_path(inode
, iblock
, offsets
);
319 free_data(inode
, idata
+offsets
[0], idata
+ DIRECT
);
324 first_whole
= offsets
[0] + 1 - DIRECT
;
325 partial
= find_shared(inode
, n
, offsets
, chain
, &nr
);
327 if (partial
== chain
)
328 mark_inode_dirty(inode
);
330 mark_buffer_dirty_inode(partial
->bh
, inode
);
331 free_branches(inode
, &nr
, &nr
+1, (chain
+n
-1) - partial
);
333 /* Clear the ends of indirect blocks on the shared branch */
334 while (partial
> chain
) {
335 free_branches(inode
, partial
->p
+ 1, block_end(partial
->bh
),
336 (chain
+n
-1) - partial
);
337 mark_buffer_dirty_inode(partial
->bh
, inode
);
338 brelse (partial
->bh
);
342 /* Kill the remaining (whole) subtrees */
343 while (first_whole
< DEPTH
-1) {
344 nr
= idata
[DIRECT
+first_whole
];
346 idata
[DIRECT
+first_whole
] = 0;
347 mark_inode_dirty(inode
);
348 free_branches(inode
, &nr
, &nr
+1, first_whole
+1);
352 inode
->i_mtime
= inode
->i_ctime
= CURRENT_TIME_SEC
;
353 mark_inode_dirty(inode
);
356 static inline unsigned nblocks(loff_t size
, struct super_block
*sb
)
358 int k
= sb
->s_blocksize_bits
- 10;
359 unsigned blocks
, res
, direct
= DIRECT
, i
= DEPTH
;
360 blocks
= (size
+ sb
->s_blocksize
- 1) >> (BLOCK_SIZE_BITS
+ k
);
362 while (--i
&& blocks
> direct
) {
364 blocks
+= sb
->s_blocksize
/sizeof(block_t
) - 1;
365 blocks
/= sb
->s_blocksize
/sizeof(block_t
);