1 /* $NetBSD: chfs_nodeops.c,v 1.3 2012/10/19 12:44:39 ttoth Exp $ */
4 * Copyright (c) 2010 Department of Software Engineering,
5 * University of Szeged, Hungary
6 * Copyright (C) 2010 David Tengeri <dtengeri@inf.u-szeged.hu>
7 * Copyright (C) 2010 Tamas Toth <ttoth@inf.u-szeged.hu>
8 * Copyright (C) 2010 Adam Hoka <ahoka@NetBSD.org>
11 * This code is derived from software contributed to The NetBSD Foundation
12 * by the Department of Software Engineering, University of Szeged, Hungary
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in the
21 * documentation and/or other materials provided with the distribution.
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * chfs_update_eb_dirty - updates dirty and free space, first and
40 * last node references
41 * Returns zero in case of success, 1 in case of fail.
44 chfs_update_eb_dirty(struct chfs_mount
*chmp
,
45 struct chfs_eraseblock
*cheb
, uint32_t size
)
47 KASSERT(mutex_owned(&chmp
->chm_lock_mountfields
));
48 KASSERT(!mutex_owned(&chmp
->chm_lock_sizes
));
53 if (size
> cheb
->free_size
) {
54 chfs_err("free_size (%d) is less then dirty space (%d) "
55 "on block (%d)\n", cheb
->free_size
, size
, cheb
->lnr
);
58 mutex_enter(&chmp
->chm_lock_sizes
);
59 chfs_change_size_free(chmp
, cheb
, -size
);
60 chfs_change_size_dirty(chmp
, cheb
, size
);
61 mutex_exit(&chmp
->chm_lock_sizes
);
66 * chfs_add_node_to_list - adds a data node ref to vnode cache's dnode list
67 * This function inserts a data node ref to the list of vnode cache.
68 * The list is sorted by data node's lnr and offset.
71 chfs_add_node_to_list(struct chfs_mount
*chmp
,
72 struct chfs_vnode_cache
*vc
,
73 struct chfs_node_ref
*new, struct chfs_node_ref
**list
)
75 KASSERT(mutex_owned(&chmp
->chm_lock_vnocache
));
77 struct chfs_node_ref
*nextref
= *list
;
78 struct chfs_node_ref
*prevref
= NULL
;
80 while (nextref
&& nextref
!= (struct chfs_node_ref
*)vc
&&
81 (nextref
->nref_lnr
<= new->nref_lnr
)) {
82 if (nextref
->nref_lnr
== new->nref_lnr
) {
83 while (nextref
&& nextref
!=
84 (struct chfs_node_ref
*)vc
&&
85 (CHFS_GET_OFS(nextref
->nref_offset
) <
86 CHFS_GET_OFS(new->nref_offset
))) {
88 nextref
= nextref
->nref_next
;
93 nextref
= nextref
->nref_next
;
96 if (nextref
&& nextref
!= (struct chfs_node_ref
*)vc
&&
97 nextref
->nref_lnr
== new->nref_lnr
&&
98 CHFS_GET_OFS(nextref
->nref_offset
) ==
99 CHFS_GET_OFS(new->nref_offset
)) {
100 new->nref_next
= nextref
->nref_next
;
101 chfs_mark_node_obsolete(chmp
, nextref
);
103 new->nref_next
= nextref
;
106 KASSERT(new->nref_next
!= NULL
);
109 prevref
->nref_next
= new;
116 * chfs_remove_node_from_list - removes a node from a list
117 * Usually used for removing data nodes.
120 chfs_remove_node_from_list(struct chfs_mount
*chmp
,
121 struct chfs_vnode_cache
*vc
,
122 struct chfs_node_ref
*old_nref
, struct chfs_node_ref
**list
)
124 KASSERT(mutex_owned(&chmp
->chm_lock_mountfields
));
125 KASSERT(mutex_owned(&chmp
->chm_lock_vnocache
));
127 struct chfs_node_ref
*tmpnref
;
129 if (*list
== (struct chfs_node_ref
*)vc
) {
134 KASSERT(old_nref
->nref_next
!= NULL
);
136 if (*list
== old_nref
) {
137 *list
= old_nref
->nref_next
;
140 while (tmpnref
->nref_next
&&
141 tmpnref
->nref_next
!= (struct chfs_node_ref
*)vc
) {
142 if (tmpnref
->nref_next
== old_nref
) {
143 tmpnref
->nref_next
= old_nref
->nref_next
;
146 tmpnref
= tmpnref
->nref_next
;
152 * chfs_remove_and_obsolete - removes a node from a list and obsoletes the nref
153 * We should use this function carefully on data nodes,
154 * because removing a frag will also obsolete the node ref.
157 chfs_remove_and_obsolete(struct chfs_mount
*chmp
,
158 struct chfs_vnode_cache
*vc
,
159 struct chfs_node_ref
*old_nref
, struct chfs_node_ref
**list
)
161 KASSERT(mutex_owned(&chmp
->chm_lock_mountfields
));
162 KASSERT(mutex_owned(&chmp
->chm_lock_vnocache
));
164 chfs_remove_node_from_list(chmp
, vc
, old_nref
, list
);
166 dbg("[MARK] vno: %llu lnr: %u ofs: %u\n", vc
->vno
, old_nref
->nref_lnr
,
167 old_nref
->nref_offset
);
168 chfs_mark_node_obsolete(chmp
, old_nref
);
171 /* chfs_add_fd_to_inode - adds a directory entry to an inode */
173 chfs_add_fd_to_inode(struct chfs_mount
*chmp
,
174 struct chfs_inode
*parent
, struct chfs_dirent
*new)
176 struct chfs_dirent
*fd
, *tmpfd
;
178 /* update highest version */
179 if (new->version
> parent
->chvc
->highest_version
) {
180 parent
->chvc
->highest_version
= new->version
;
183 TAILQ_FOREACH_SAFE(fd
, &parent
->dents
, fds
, tmpfd
) {
184 if (fd
->nhash
> new->nhash
) {
185 /* insert new before fd */
186 TAILQ_INSERT_BEFORE(fd
, new, fds
);
188 } else if (fd
->nhash
== new->nhash
&&
189 !strcmp(fd
->name
, new->name
)) {
190 if (new->version
> fd
->version
) {
191 /* replace fd with new */
192 TAILQ_INSERT_BEFORE(fd
, new, fds
);
193 TAILQ_REMOVE(&parent
->dents
, fd
, fds
);
195 mutex_enter(&chmp
->chm_lock_vnocache
);
196 chfs_remove_and_obsolete(chmp
, parent
->chvc
, fd
->nref
,
197 &parent
->chvc
->dirents
);
198 mutex_exit(&chmp
->chm_lock_vnocache
);
200 chfs_free_dirent(fd
);
202 /* new is older (normally it's not an option) */
203 chfs_mark_node_obsolete(chmp
, new->nref
);
204 chfs_free_dirent(new);
209 /* if we couldnt fit it elsewhere, lets add to the end */
210 /* FIXME insert tail or insert head? */
211 TAILQ_INSERT_HEAD(&parent
->dents
, new, fds
);
215 /* chfs_add_vnode_ref_to_vc - adds a vnode info to the vnode cache */
217 chfs_add_vnode_ref_to_vc(struct chfs_mount
*chmp
,
218 struct chfs_vnode_cache
*vc
, struct chfs_node_ref
*new)
220 KASSERT(mutex_owned(&chmp
->chm_lock_vnocache
));
221 struct chfs_node_ref
*nref
;
223 /* store only the last one, drop the others */
224 while (vc
->v
!= (struct chfs_node_ref
*)vc
) {
226 chfs_remove_and_obsolete(chmp
, vc
, nref
, &vc
->v
);
229 new->nref_next
= (struct chfs_node_ref
*)vc
;
233 /* chfs_nref_next - step to the next in-memory nref */
234 struct chfs_node_ref
*
235 chfs_nref_next(struct chfs_node_ref
*nref
)
238 if (nref
->nref_lnr
== REF_LINK_TO_NEXT
) {
240 if (!nref
->nref_next
)
243 /* link to the next block */
244 nref
= nref
->nref_next
;
247 if (nref
->nref_lnr
== REF_EMPTY_NODE
)
253 /* chfs_nref_len - calculates the length of an nref */
255 chfs_nref_len(struct chfs_mount
*chmp
,
256 struct chfs_eraseblock
*cheb
, struct chfs_node_ref
*nref
)
258 struct chfs_node_ref
*next
;
260 KASSERT(mutex_owned(&chmp
->chm_lock_mountfields
));
263 cheb
= &chmp
->chm_blocks
[nref
->nref_lnr
];
265 next
= chfs_nref_next(nref
);
268 return chmp
->chm_ebh
->eb_size
- cheb
->free_size
-
269 CHFS_GET_OFS(nref
->nref_offset
);
271 return CHFS_GET_OFS(next
->nref_offset
) -
272 CHFS_GET_OFS(nref
->nref_offset
);
275 /* chfs_mark_node_obsolete - marks a node as obsolete */
277 chfs_mark_node_obsolete(struct chfs_mount
*chmp
,
278 struct chfs_node_ref
*nref
)
281 struct chfs_eraseblock
*cheb
;
283 KASSERT(mutex_owned(&chmp
->chm_lock_mountfields
));
285 KASSERT(!CHFS_REF_OBSOLETE(nref
));
287 KASSERT(nref
->nref_lnr
<= chmp
->chm_ebh
->peb_nr
);
288 cheb
= &chmp
->chm_blocks
[nref
->nref_lnr
];
291 if (cheb
->used_size
+ cheb
->free_size
+ cheb
->dirty_size
+
292 cheb
->unchecked_size
+ cheb
->wasted_size
!= chmp
->chm_ebh
->eb_size
) {
293 dbg("eraseblock leak detected!\nused: %u\nfree: %u\n"
294 "dirty: %u\nunchecked: %u\nwasted: %u\ntotal: %u\nshould be: %zu\n",
295 cheb
->used_size
, cheb
->free_size
, cheb
->dirty_size
,
296 cheb
->unchecked_size
, cheb
->wasted_size
, cheb
->used_size
+ cheb
->free_size
+
297 cheb
->dirty_size
+ cheb
->unchecked_size
+ cheb
->wasted_size
,
298 chmp
->chm_ebh
->eb_size
);
302 len
= chfs_nref_len(chmp
, cheb
, nref
);
304 mutex_enter(&chmp
->chm_lock_sizes
);
306 if (CHFS_REF_FLAGS(nref
) == CHFS_UNCHECKED_NODE_MASK
) {
307 chfs_change_size_unchecked(chmp
, cheb
, -len
);
309 chfs_change_size_used(chmp
, cheb
, -len
);
311 KASSERT(cheb
->used_size
<= chmp
->chm_ebh
->eb_size
);
313 chfs_change_size_dirty(chmp
, cheb
, len
);
316 if (cheb
->used_size
+ cheb
->free_size
+ cheb
->dirty_size
+
317 cheb
->unchecked_size
+ cheb
->wasted_size
!= chmp
->chm_ebh
->eb_size
) {
318 panic("eraseblock leak detected!\nused: %u\nfree: %u\n"
319 "dirty: %u\nunchecked: %u\nwasted: %u\ntotal: %u\nshould be: %zu\n",
320 cheb
->used_size
, cheb
->free_size
, cheb
->dirty_size
,
321 cheb
->unchecked_size
, cheb
->wasted_size
, cheb
->used_size
+ cheb
->free_size
+
322 cheb
->dirty_size
+ cheb
->unchecked_size
+ cheb
->wasted_size
,
323 chmp
->chm_ebh
->eb_size
);
326 nref
->nref_offset
= CHFS_GET_OFS(nref
->nref_offset
) |
327 CHFS_OBSOLETE_NODE_MASK
;
329 if (chmp
->chm_flags
& CHFS_MP_FLAG_SCANNING
) {
330 /*Scan is in progress, do nothing now*/
331 mutex_exit(&chmp
->chm_lock_sizes
);
335 if (cheb
== chmp
->chm_nextblock
) {
336 dbg("Not moving nextblock to dirty/erase_pending list\n");
337 } else if (!cheb
->used_size
&& !cheb
->unchecked_size
) {
338 if (cheb
== chmp
->chm_gcblock
) {
339 dbg("gcblock is completely dirtied\n");
340 chmp
->chm_gcblock
= NULL
;
342 /* remove from a tailq, but we don't know which tailq contains this cheb
343 * so we remove it from the dirty list now */
344 //TAILQ_REMOVE(&chmp->chm_dirty_queue, cheb, queue);
346 struct chfs_eraseblock
*eb
, *tmpeb
;
348 TAILQ_FOREACH_SAFE(eb
, &chmp
->chm_free_queue
, queue
, tmpeb
) {
350 TAILQ_REMOVE(&chmp
->chm_free_queue
, cheb
, queue
);
356 TAILQ_FOREACH_SAFE(eb
, &chmp
->chm_dirty_queue
, queue
, tmpeb
) {
358 TAILQ_REMOVE(&chmp
->chm_dirty_queue
, cheb
, queue
);
365 TAILQ_FOREACH_SAFE(eb
, &chmp
->chm_very_dirty_queue
, queue
, tmpeb
) {
367 TAILQ_REMOVE(&chmp
->chm_very_dirty_queue
, cheb
, queue
);
374 TAILQ_FOREACH_SAFE(eb
, &chmp
->chm_clean_queue
, queue
, tmpeb
) {
376 TAILQ_REMOVE(&chmp
->chm_clean_queue
, cheb
, queue
);
383 if (chmp
->chm_wbuf_len
) {
384 dbg("Adding block to erasable pending wbuf queue\n");
385 TAILQ_INSERT_TAIL(&chmp
->chm_erasable_pending_wbuf_queue
,
388 TAILQ_INSERT_TAIL(&chmp
->chm_erase_pending_queue
,
390 chmp
->chm_nr_erasable_blocks
++;
392 chfs_remap_leb(chmp
);
393 } else if (cheb
== chmp
->chm_gcblock
) {
394 dbg("Not moving gcblock to dirty list\n");
395 } else if (cheb
->dirty_size
> MAX_DIRTY_TO_CLEAN
&&
396 cheb
->dirty_size
- len
<= MAX_DIRTY_TO_CLEAN
) {
397 dbg("Freshly dirtied, remove it from clean queue and "
398 "add it to dirty\n");
399 TAILQ_REMOVE(&chmp
->chm_clean_queue
, cheb
, queue
);
400 TAILQ_INSERT_TAIL(&chmp
->chm_dirty_queue
, cheb
, queue
);
401 } else if (VERY_DIRTY(chmp
, cheb
->dirty_size
) &&
402 !VERY_DIRTY(chmp
, cheb
->dirty_size
- len
)) {
403 dbg("Becomes now very dirty, remove it from dirty "
404 "queue and add it to very dirty\n");
405 TAILQ_REMOVE(&chmp
->chm_dirty_queue
, cheb
, queue
);
406 TAILQ_INSERT_TAIL(&chmp
->chm_very_dirty_queue
, cheb
, queue
);
408 dbg("Leave cheb where it is\n");
410 mutex_exit(&chmp
->chm_lock_sizes
);
415 * chfs_close_eraseblock - close an eraseblock
417 * This function close the physical chain of the nodes on the eraseblock,
418 * convert its free size to dirty and add it to clean, dirty or very dirty list.
421 chfs_close_eraseblock(struct chfs_mount
*chmp
,
422 struct chfs_eraseblock
*cheb
)
425 struct chfs_node_ref
*nref
;
427 KASSERT(mutex_owned(&chmp
->chm_lock_mountfields
));
429 offset
= chmp
->chm_ebh
->eb_size
- cheb
->free_size
;
432 nref
= chfs_alloc_node_ref(cheb
);
436 nref
->nref_next
= NULL
;
437 nref
->nref_offset
= offset
;
439 // Mark space as dirty
440 chfs_update_eb_dirty(chmp
, cheb
, cheb
->free_size
);
442 if (cheb
->dirty_size
< MAX_DIRTY_TO_CLEAN
) {
443 TAILQ_INSERT_TAIL(&chmp
->chm_clean_queue
, cheb
, queue
);
444 } else if (VERY_DIRTY(chmp
, cheb
->dirty_size
)) {
445 TAILQ_INSERT_TAIL(&chmp
->chm_very_dirty_queue
, cheb
, queue
);
447 TAILQ_INSERT_TAIL(&chmp
->chm_dirty_queue
, cheb
, queue
);
453 * chfs_reserve_space_normal -
454 * checks available space and calls chfs_reserve_space
455 * used during writing
458 chfs_reserve_space_normal(struct chfs_mount
*chmp
, uint32_t size
, int prio
)
462 KASSERT(mutex_owned(&chmp
->chm_lock_mountfields
));
464 mutex_enter(&chmp
->chm_lock_sizes
);
465 while (chmp
->chm_nr_free_blocks
+ chmp
->chm_nr_erasable_blocks
< chmp
->chm_resv_blocks_write
) {
466 dbg("free: %d, erasable: %d, resv: %d\n", chmp
->chm_nr_free_blocks
, chmp
->chm_nr_erasable_blocks
, chmp
->chm_resv_blocks_write
);
467 uint32_t avail
, dirty
;
468 if (prio
== ALLOC_DELETION
&& chmp
->chm_nr_free_blocks
+ chmp
->chm_nr_erasable_blocks
>= chmp
->chm_resv_blocks_deletion
)
471 dirty
= chmp
->chm_dirty_size
- chmp
->chm_nr_erasable_blocks
* chmp
->chm_ebh
->eb_size
+ chmp
->chm_unchecked_size
;
472 if (dirty
< chmp
->chm_nospc_dirty
) {
473 dbg("dirty: %u < nospc_dirty: %u\n", dirty
, chmp
->chm_nospc_dirty
);
475 mutex_exit(&chmp
->chm_lock_sizes
);
479 avail
= chmp
->chm_free_size
- (chmp
->chm_resv_blocks_write
* chmp
->chm_ebh
->eb_size
);
481 dbg("size: %u > avail: %u\n", size
, avail
);
483 mutex_exit(&chmp
->chm_lock_sizes
);
487 mutex_exit(&chmp
->chm_lock_sizes
);
488 ret
= chfs_gcollect_pass(chmp
);
489 mutex_enter(&chmp
->chm_lock_sizes
);
491 if (chmp
->chm_nr_erasable_blocks
||
492 !TAILQ_EMPTY(&chmp
->chm_erasable_pending_wbuf_queue
) ||
494 ret
= chfs_remap_leb(chmp
);
498 mutex_exit(&chmp
->chm_lock_sizes
);
503 mutex_exit(&chmp
->chm_lock_sizes
);
504 ret
= chfs_reserve_space(chmp
, size
);
510 /* chfs_reserve_space_gc - tries to reserve space for GC */
512 chfs_reserve_space_gc(struct chfs_mount
*chmp
, uint32_t size
)
516 KASSERT(mutex_owned(&chmp
->chm_lock_mountfields
));
518 mutex_enter(&chmp
->chm_lock_sizes
);
519 chfs_remap_leb(chmp
);
521 if (size
> chmp
->chm_free_size
) {
522 dbg("size: %u\n", size
);
523 mutex_exit(&chmp
->chm_lock_sizes
);
527 mutex_exit(&chmp
->chm_lock_sizes
);
528 ret
= chfs_reserve_space(chmp
, size
);
533 * chfs_reserve_space - finds a block which free size is >= requested size
534 * Returns zero in case of success, error code in case of fail.
537 chfs_reserve_space(struct chfs_mount
*chmp
, uint32_t size
)
539 //TODO define minimum reserved blocks, which is needed for writing
540 //TODO check we have enough free blocks to write
541 //TODO if no: need erase and GC
544 struct chfs_eraseblock
*cheb
;
546 KASSERT(mutex_owned(&chmp
->chm_lock_mountfields
));
547 KASSERT(!mutex_owned(&chmp
->chm_lock_sizes
));
549 cheb
= chmp
->chm_nextblock
;
550 if (cheb
&& size
> cheb
->free_size
) {
551 dbg("size: %u > free_size: %u\n", size
, cheb
->free_size
);
553 * There isn't enough space on this eraseblock, we mark this as
554 * dirty and close the physical chain of the node refs.
556 //Write out pending data if any
557 if (chmp
->chm_wbuf_len
) {
558 chfs_flush_pending_wbuf(chmp
);
559 //FIXME need goto restart here?
562 while (chmp
->chm_wbuf_ofs
< chmp
->chm_ebh
->eb_size
) {
563 dbg("wbuf ofs: %zu - eb_size: %zu\n",
564 chmp
->chm_wbuf_ofs
, chmp
->chm_ebh
->eb_size
);
565 chfs_flush_pending_wbuf(chmp
);
568 if (!(chmp
->chm_wbuf_ofs
% chmp
->chm_ebh
->eb_size
) && !chmp
->chm_wbuf_len
)
569 chmp
->chm_wbuf_ofs
= 0xffffffff;
571 err
= chfs_close_eraseblock(chmp
, cheb
);
578 //get a block for nextblock
579 if (TAILQ_EMPTY(&chmp
->chm_free_queue
)) {
580 // If this succeeds there will be a block on free_queue
581 dbg("cheb remap (free: %d)\n", chmp
->chm_nr_free_blocks
);
582 err
= chfs_remap_leb(chmp
);
586 cheb
= TAILQ_FIRST(&chmp
->chm_free_queue
);
587 TAILQ_REMOVE(&chmp
->chm_free_queue
, cheb
, queue
);
588 chmp
->chm_nextblock
= cheb
;
589 chmp
->chm_nr_free_blocks
--;