VM: only single page chunks
[minix.git] / sys / ufs / chfs / chfs_build.c
blob3904b023a4fe959011e43c1b0741700fb051f3f2
1 /* $NetBSD: chfs_build.c,v 1.2 2011/11/24 21:22:39 agc Exp $ */
3 /*-
4 * Copyright (c) 2010 Department of Software Engineering,
5 * University of Szeged, Hungary
6 * All rights reserved.
8 * This code is derived from software contributed to The NetBSD Foundation
9 * by the Department of Software Engineering, University of Szeged, Hungary
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
25 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
27 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
33 #include "chfs.h"
34 //#include </root/xipffs/netbsd.chfs/chfs.h>
37 void
38 chfs_calc_trigger_levels(struct chfs_mount *chmp)
40 uint32_t size;
42 chmp->chm_resv_blocks_deletion = 2;
44 size = chmp->chm_ebh->flash_size / 50; //2% of flash size
45 size += chmp->chm_ebh->peb_nr * 100;
46 size += chmp->chm_ebh->eb_size - 1;
48 chmp->chm_resv_blocks_write =
49 chmp->chm_resv_blocks_deletion + (size / chmp->chm_ebh->eb_size);
50 chmp->chm_resv_blocks_gctrigger = chmp->chm_resv_blocks_write + 1;
51 chmp->chm_resv_blocks_gcmerge = chmp->chm_resv_blocks_deletion + 1;
52 chmp->chm_vdirty_blocks_gctrigger = chmp->chm_resv_blocks_gctrigger * 10;
54 chmp->chm_nospc_dirty =
55 chmp->chm_ebh->eb_size + (chmp->chm_ebh->flash_size / 100);
59 /**
60 * chfs_build_set_vnodecache_nlink - set pvno and nlink in vnodecaches
61 * @chmp: CHFS main descriptor structure
62 * @vc: vnode cache
63 * This function travels @vc's directory entries and sets the pvno and nlink
64 * attribute of the vnode where the dirent's vno points.
66 void
67 chfs_build_set_vnodecache_nlink(struct chfs_mount *chmp,
68 struct chfs_vnode_cache *vc)
70 struct chfs_dirent *fd;
71 //dbg("set nlink\n");
73 // for (fd = vc->scan_dirents; fd; fd = fd->next) {
74 TAILQ_FOREACH(fd, &vc->scan_dirents, fds) {
75 struct chfs_vnode_cache *child_vc;
77 if (!fd->vno)
78 continue;
80 mutex_enter(&chmp->chm_lock_vnocache);
81 child_vc = chfs_vnode_cache_get(chmp, fd->vno);
82 mutex_exit(&chmp->chm_lock_vnocache);
83 if (!child_vc) {
84 chfs_mark_node_obsolete(chmp, fd->nref);
85 continue;
87 if (fd->type == VDIR) {
88 if (child_vc->nlink < 1)
89 child_vc->nlink = 1;
91 if (child_vc->pvno) {
92 chfs_err("found a hard link: child dir: %s"
93 ", (vno: %llu) of dir vno: %llu\n",
94 fd->name, (unsigned long long)fd->vno,
95 (unsigned long long)vc->vno);
96 } else {
97 //dbg("child_vc->pvno =
98 // vc->vno; pvno = %d\n", child_vc->pvno);
99 child_vc->pvno = vc->vno;
102 child_vc->nlink++;
103 //dbg("child_vc->nlink++;\n");
104 //child_vc->nlink++;
105 vc->nlink++;
110 * chfs_build_remove_unlinked vnode
112 /* static */
113 void
114 chfs_build_remove_unlinked_vnode(struct chfs_mount *chmp,
115 struct chfs_vnode_cache *vc,
116 // struct chfs_dirent **unlinked)
117 struct chfs_dirent_list *unlinked)
119 struct chfs_node_ref *nref;
120 struct chfs_dirent *fd, *tmpfd;
122 dbg("START\n");
123 dbg("vno: %llu\n", (unsigned long long)vc->vno);
125 nref = vc->dnode;
126 KASSERT(mutex_owned(&chmp->chm_lock_mountfields));
127 // The vnode cache is at the end of the data node's chain
128 while (nref != (struct chfs_node_ref *)vc) {
129 struct chfs_node_ref *next = nref->nref_next;
130 dbg("mark dnode\n");
131 chfs_mark_node_obsolete(chmp, nref);
132 nref = next;
134 nref = vc->dirents;
135 // The vnode cache is at the end of the dirent node's chain
136 while (nref != (struct chfs_node_ref *)vc) {
137 struct chfs_node_ref *next = nref->nref_next;
138 dbg("mark dirent\n");
139 chfs_mark_node_obsolete(chmp, nref);
140 nref = next;
142 if (!TAILQ_EMPTY(&vc->scan_dirents)) {
143 TAILQ_FOREACH_SAFE(fd, &vc->scan_dirents, fds, tmpfd) {
144 // while (vc->scan_dirents) {
145 struct chfs_vnode_cache *child_vc;
146 // fd = vc->scan_dirents;
147 dbg("dirent dump:\n");
148 dbg(" ->vno: %llu\n", (unsigned long long)fd->vno);
149 dbg(" ->version: %llu\n", (unsigned long long)fd->version);
150 dbg(" ->nhash: 0x%x\n", fd->nhash);
151 dbg(" ->nsize: %d\n", fd->nsize);
152 dbg(" ->name: %s\n", fd->name);
153 dbg(" ->type: %d\n", fd->type);
154 // vc->scan_dirents = fd->next;
155 TAILQ_REMOVE(&vc->scan_dirents, fd, fds);
157 if (!fd->vno) {
158 chfs_free_dirent(fd);
159 continue;
161 mutex_enter(&chmp->chm_lock_vnocache);
162 child_vc = chfs_vnode_cache_get(chmp, fd->vno);
163 mutex_exit(&chmp->chm_lock_vnocache);
164 if (!child_vc) {
165 chfs_free_dirent(fd);
166 continue;
169 * Decrease nlink in child. If it is 0, add to unlinked
170 * dirents or just free it otherwise.
172 child_vc->nlink--;
174 if (!child_vc->nlink) {
175 //dbg("nlink is 0\n");
176 // fd->next = *unlinked;
177 // *unlinked = fd;
178 // XXX HEAD or TAIL?
179 // original code did HEAD, but we could add
180 // it to the TAIL easily with TAILQ.
181 TAILQ_INSERT_TAIL(unlinked, fd, fds);
182 } else {
183 chfs_free_dirent(fd);
186 } else {
187 dbg("there are no scan dirents\n");
190 nref = vc->v;
191 while ((struct chfs_vnode_cache *)nref != vc) {
192 if (!CHFS_REF_OBSOLETE(nref))
193 chfs_mark_node_obsolete(chmp, nref);
194 nref = nref->nref_next;
197 mutex_enter(&chmp->chm_lock_vnocache);
198 if (vc->vno != CHFS_ROOTINO)
199 chfs_vnode_cache_set_state(chmp, vc, VNO_STATE_UNCHECKED);
200 mutex_exit(&chmp->chm_lock_vnocache);
201 dbg("END\n");
205 * chfs_build_filesystem - build in-memory representation of filesystem
206 * @chmp: super block information
208 * Step 1:
209 * This function scans through the eraseblocks mapped in EBH.
210 * During scan builds up the map of vnodes and directory entries and puts them
211 * into the vnode_cache.
212 * Step 2:
213 * Scans the directory tree and set the nlink in the vnode caches.
214 * Step 3:
215 * Scans vnode caches with nlink = 0
218 chfs_build_filesystem(struct chfs_mount *chmp)
220 int i,err = 0;
221 struct chfs_vnode_cache *vc;
222 struct chfs_dirent *fd, *tmpfd;
223 // struct chfs_dirent *unlinked = NULL;
224 struct chfs_node_ref **nref;
225 struct chfs_dirent_list unlinked;
226 struct chfs_vnode_cache *notregvc;
228 TAILQ_INIT(&unlinked);
230 mutex_enter(&chmp->chm_lock_mountfields);
233 * Step 1
235 chmp->chm_flags |= CHFS_MP_FLAG_SCANNING;
236 for (i = 0; i < chmp->chm_ebh->peb_nr; i++) {
237 //dbg("processing block: %d\n", i);
238 chmp->chm_blocks[i].lnr = i;
239 chmp->chm_blocks[i].free_size = chmp->chm_ebh->eb_size;
240 //If the LEB is add to free list skip it.
241 if (chmp->chm_ebh->lmap[i] < 0) {
242 //dbg("block %d is unmapped\n", i);
243 TAILQ_INSERT_TAIL(&chmp->chm_free_queue,
244 &chmp->chm_blocks[i], queue);
245 chmp->chm_nr_free_blocks++;
246 continue;
249 err = chfs_scan_eraseblock(chmp, &chmp->chm_blocks[i]);
250 switch (err) {
251 case CHFS_BLK_STATE_FREE:
252 chmp->chm_nr_free_blocks++;
253 TAILQ_INSERT_TAIL(&chmp->chm_free_queue,
254 &chmp->chm_blocks[i], queue);
255 break;
256 case CHFS_BLK_STATE_CLEAN:
257 TAILQ_INSERT_TAIL(&chmp->chm_clean_queue,
258 &chmp->chm_blocks[i], queue);
259 break;
260 case CHFS_BLK_STATE_PARTDIRTY:
261 //dbg("free size: %d\n", chmp->chm_blocks[i].free_size);
262 if (chmp->chm_blocks[i].free_size > chmp->chm_wbuf_pagesize &&
263 (!chmp->chm_nextblock ||
264 chmp->chm_blocks[i].free_size >
265 chmp->chm_nextblock->free_size)) {
266 /* convert the old nextblock's free size to
267 * dirty and put it on a list */
268 if (chmp->chm_nextblock) {
269 err = chfs_close_eraseblock(chmp,
270 chmp->chm_nextblock);
271 if (err)
272 return err;
274 chmp->chm_nextblock = &chmp->chm_blocks[i];
275 } else {
276 /* convert the scanned block's free size to
277 * dirty and put it on a list */
278 err = chfs_close_eraseblock(chmp,
279 &chmp->chm_blocks[i]);
280 if (err)
281 return err;
283 break;
284 case CHFS_BLK_STATE_ALLDIRTY:
286 * The block has a valid EBH header, but it doesn't
287 * contain any valid data.
289 TAILQ_INSERT_TAIL(&chmp->chm_erase_pending_queue,
290 &chmp->chm_blocks[i], queue);
291 chmp->chm_nr_erasable_blocks++;
292 break;
293 default:
294 /* It was an error, unknown state */
295 break;
299 chmp->chm_flags &= ~CHFS_MP_FLAG_SCANNING;
302 //TODO need bad block check (and bad block handling in EBH too!!)
303 /* Now EBH only checks block is bad during its scan operation.
304 * Need check at erase + write + read...
308 * Step 2
310 chmp->chm_flags |= CHFS_MP_FLAG_BUILDING;
311 for (i = 0; i < VNODECACHE_SIZE; i++) {
312 vc = chmp->chm_vnocache_hash[i];
313 while (vc) {
314 dbg("vc->vno: %llu\n", (unsigned long long)vc->vno);
315 if (!TAILQ_EMPTY(&vc->scan_dirents))
316 chfs_build_set_vnodecache_nlink(chmp, vc);
317 vc = vc->next;
322 * Step 3
323 * Scan for vnodes with 0 nlink.
325 for (i = 0; i < VNODECACHE_SIZE; i++) {
326 vc = chmp->chm_vnocache_hash[i];
327 while (vc) {
328 if (vc->nlink) {
329 vc = vc->next;
330 continue;
333 //dbg("remove unlinked start i: %d\n", i);
334 chfs_build_remove_unlinked_vnode(chmp,
335 vc, &unlinked);
336 //dbg("remove unlinked end\n");
337 vc = vc->next;
340 /* Remove the newly unlinked vnodes. They are on the unlinked list */
341 TAILQ_FOREACH_SAFE(fd, &unlinked, fds, tmpfd) {
342 // while (unlinked) {
343 // fd = unlinked;
344 // unlinked = fd->next;
345 TAILQ_REMOVE(&unlinked, fd, fds);
346 mutex_enter(&chmp->chm_lock_vnocache);
347 vc = chfs_vnode_cache_get(chmp, fd->vno);
348 mutex_exit(&chmp->chm_lock_vnocache);
349 if (vc) {
350 chfs_build_remove_unlinked_vnode(chmp,
351 vc, &unlinked);
353 chfs_free_dirent(fd);
356 chmp->chm_flags &= ~CHFS_MP_FLAG_BUILDING;
358 /* Free all dirents */
359 for (i = 0; i < VNODECACHE_SIZE; i++) {
360 vc = chmp->chm_vnocache_hash[i];
361 while (vc) {
362 TAILQ_FOREACH_SAFE(fd, &vc->scan_dirents, fds, tmpfd) {
363 // while (vc->scan_dirents) {
364 // fd = vc->scan_dirents;
365 // vc->scan_dirents = fd->next;
366 TAILQ_REMOVE(&vc->scan_dirents, fd, fds);
367 if (fd->vno == 0) {
368 //for (nref = &vc->dirents;
369 // *nref != fd->nref;
370 // nref = &((*nref)->next));
372 nref = &fd->nref;
373 *nref = fd->nref->nref_next;
374 //fd->nref->nref_next = NULL;
375 } else if (fd->type == VDIR) {
376 //set state every non-VREG file's vc
377 mutex_enter(&chmp->chm_lock_vnocache);
378 notregvc =
379 chfs_vnode_cache_get(chmp,
380 fd->vno);
381 chfs_vnode_cache_set_state(chmp,
382 notregvc, VNO_STATE_PRESENT);
383 mutex_exit(&chmp->chm_lock_vnocache);
385 chfs_free_dirent(fd);
387 // vc->scan_dirents = NULL;
388 KASSERT(TAILQ_EMPTY(&vc->scan_dirents));
389 vc = vc->next;
393 //Set up chmp->chm_wbuf_ofs for the first write
394 if (chmp->chm_nextblock) {
395 dbg("free_size: %d\n", chmp->chm_nextblock->free_size);
396 chmp->chm_wbuf_ofs = chmp->chm_ebh->eb_size -
397 chmp->chm_nextblock->free_size;
398 } else {
399 chmp->chm_wbuf_ofs = 0xffffffff;
401 mutex_exit(&chmp->chm_lock_mountfields);
403 return 0;