VM: only single page chunks
[minix.git] / sys / ufs / chfs / chfs_erase.c
blob9ae49c37cc7c80584c52a3e7da2e74ba8011d2a1
1 /* $NetBSD: chfs_erase.c,v 1.1 2011/11/24 15:51:31 ahoka Exp $ */
3 /*-
4 * Copyright (c) 2010 Department of Software Engineering,
5 * University of Szeged, Hungary
6 * Copyright (c) 2010 David Tengeri <dtengeri@inf.u-szeged.hu>
7 * All rights reserved.
9 * This code is derived from software contributed to The NetBSD Foundation
10 * by the Department of Software Engineering, University of Szeged, Hungary
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
22 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
23 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
24 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
28 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
29 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
35 * chfs_erase.c
37 * Copyright (C) 2010 David Tengeri <dtengeri@inf.u-szeged.hu>,
38 * ...
39 * University of Szeged, Hungary
42 #include "chfs.h"
45 /**
46 * chfs_remap_leb - unmap and then map a leb
47 * @chmp: chfs mount structure
49 * This function gets an eraseblock from the erasable queue, unmaps it through
50 * EBH and maps another eraseblock to the same LNR.
51 * EBH will find a free eraseblock if any or will erase one if there isn't any
52 * free, just dirty block.
54 * Returns zero on case of success, errorcode otherwise.
56 * Needs more brainstorming here.
58 int
59 chfs_remap_leb(struct chfs_mount *chmp)
61 int err;
62 struct chfs_eraseblock *cheb;
63 dbg("chfs_remap_leb\n");
64 uint32_t dirty, unchecked, used, free, wasted;
66 //dbg("chmp->chm_nr_erasable_blocks: %d\n", chmp->chm_nr_erasable_blocks);
67 //dbg("ltree: %p ecl: %p\n", &chmp->chm_ebh->ltree_lock, &chmp->chm_lock_sizes);
68 KASSERT(!rw_write_held(&chmp->chm_lock_wbuf));
69 KASSERT(mutex_owned(&chmp->chm_lock_mountfields));
70 KASSERT(mutex_owned(&chmp->chm_lock_sizes));
72 if (!chmp->chm_nr_erasable_blocks) {
73 //TODO
74 /* We don't have any erasable blocks, need to check if there are
75 * blocks on erasable_pending_wbuf_queue, flush the data and then
76 * we can remap it.
77 * If there aren't any blocks on that list too, we need to GC?
79 if (!TAILQ_EMPTY(&chmp->chm_erasable_pending_wbuf_queue)) {
80 cheb = TAILQ_FIRST(&chmp->chm_erasable_pending_wbuf_queue);
81 TAILQ_REMOVE(&chmp->chm_erasable_pending_wbuf_queue, cheb, queue);
82 if (chmp->chm_wbuf_len) {
83 mutex_exit(&chmp->chm_lock_sizes);
84 chfs_flush_pending_wbuf(chmp);
85 mutex_enter(&chmp->chm_lock_sizes);
87 TAILQ_INSERT_TAIL(&chmp->chm_erase_pending_queue, cheb, queue);
88 chmp->chm_nr_erasable_blocks++;
89 } else {
90 /* We can't delete any block. */
91 //FIXME should we return ENOSPC?
92 return ENOSPC;
95 cheb = TAILQ_FIRST(&chmp->chm_erase_pending_queue);
96 TAILQ_REMOVE(&chmp->chm_erase_pending_queue, cheb, queue);
97 chmp->chm_nr_erasable_blocks--;
99 dirty = cheb->dirty_size;
100 unchecked = cheb->unchecked_size;
101 used = cheb->used_size;
102 free = cheb->free_size;
103 wasted = cheb->wasted_size;
105 // Free allocated node references for this eraseblock
106 chfs_free_node_refs(cheb);
108 err = chfs_unmap_leb(chmp, cheb->lnr);
109 if (err)
110 return err;
112 err = chfs_map_leb(chmp, cheb->lnr);
113 if (err)
114 return err;
115 // Reset state to default and change chmp sizes too
116 chfs_change_size_dirty(chmp, cheb, -dirty);
117 chfs_change_size_unchecked(chmp, cheb, -unchecked);
118 chfs_change_size_used(chmp, cheb, -used);
119 chfs_change_size_free(chmp, cheb, chmp->chm_ebh->eb_size - free);
120 chfs_change_size_wasted(chmp, cheb, -wasted);
122 KASSERT(cheb->dirty_size == 0);
123 KASSERT(cheb->unchecked_size == 0);
124 KASSERT(cheb->used_size == 0);
125 KASSERT(cheb->free_size == chmp->chm_ebh->eb_size);
126 KASSERT(cheb->wasted_size == 0);
128 cheb->first_node = NULL;
129 cheb->last_node = NULL;
130 //put it to free_queue
131 TAILQ_INSERT_TAIL(&chmp->chm_free_queue, cheb, queue);
132 chmp->chm_nr_free_blocks++;
133 dbg("remaped (free: %d, erasable: %d)\n", chmp->chm_nr_free_blocks, chmp->chm_nr_erasable_blocks);
134 KASSERT(!TAILQ_EMPTY(&chmp->chm_free_queue));
136 return 0;