VM: only single page chunks
[minix.git] / sys / ufs / chfs / chfs_scan.c
bloba35ce7215e8208cfc0613d40bb427dabb4e29035
1 /* $NetBSD: chfs_scan.c,v 1.2 2011/11/24 21:09:37 agc Exp $ */
3 /*-
4 * Copyright (c) 2010 Department of Software Engineering,
5 * University of Szeged, Hungary
6 * Copyright (c) 2010 David Tengeri <dtengeri@inf.u-szeged.hu>
7 * All rights reserved.
9 * This code is derived from software contributed to The NetBSD Foundation
10 * by the Department of Software Engineering, University of Szeged, Hungary
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
22 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
23 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
24 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
28 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
29 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
35 * chfs_scan.c
37 * Created on: 2009.11.05.
38 * Author: dtengeri
41 #include "chfs.h"
43 /**
44 * chfs_scan_make_vnode_cache - makes a new vnode cache during scan
45 * @chmp: CHFS main descriptor structure
46 * @vno: vnode identifier
47 * This function returns a vnode cache belonging to @vno.
49 struct chfs_vnode_cache *
50 chfs_scan_make_vnode_cache(struct chfs_mount *chmp, ino_t vno)
52 struct chfs_vnode_cache *vc;
54 KASSERT(mutex_owned(&chmp->chm_lock_vnocache));
56 vc = chfs_vnode_cache_get(chmp, vno);
57 if (vc) {
58 return vc;
61 if (vno > chmp->chm_max_vno) {
62 chmp->chm_max_vno = vno;
65 vc = chfs_vnode_cache_alloc(vno);
67 //mutex_enter(&chmp->chm_lock_vnocache);
69 chfs_vnode_cache_add(chmp, vc);
71 //mutex_exit(&chmp->chm_lock_vnocache);
73 if (vno == CHFS_ROOTINO) {
74 vc->nlink = 2;
75 vc->pvno = CHFS_ROOTINO;
76 chfs_vnode_cache_set_state(chmp,
77 vc, VNO_STATE_CHECKEDABSENT);
80 return vc;
83 /**
84 * chfs_scan_check_node_hdr - checks node magic and crc
85 * @nhdr: node header to check
86 * Returns 0 if everything is OK, error code otherwise.
88 int
89 chfs_scan_check_node_hdr(struct chfs_flash_node_hdr *nhdr)
91 uint16_t magic;
92 uint32_t crc, hdr_crc;
94 magic = le16toh(nhdr->magic);
96 if (magic != CHFS_FS_MAGIC_BITMASK) {
97 dbg("bad magic\n");
98 return CHFS_NODE_BADMAGIC;
101 hdr_crc = le32toh(nhdr->hdr_crc);
102 crc = crc32(0, (uint8_t *)nhdr, CHFS_NODE_HDR_SIZE - 4);
104 if (crc != hdr_crc) {
105 dbg("bad crc\n");
106 return CHFS_NODE_BADCRC;
109 return CHFS_NODE_OK;
113 * chfs_scan_check_vnode - check vnode crc and add to vnode cache
114 * @chmp: CHFS main descriptor structure
115 * @cheb: eraseblock informations
116 * @buf: vnode to check
117 * @ofs: offset in eraseblock where vnode starts
120 chfs_scan_check_vnode(struct chfs_mount *chmp,
121 struct chfs_eraseblock *cheb, void *buf, off_t ofs)
123 KASSERT(mutex_owned(&chmp->chm_lock_mountfields));
124 struct chfs_vnode_cache *vc;
125 struct chfs_flash_vnode *vnode = buf;
126 struct chfs_node_ref *nref;
127 int err;
128 uint32_t crc;
129 ino_t vno;
131 crc = crc32(0, (uint8_t *)vnode,
132 sizeof(struct chfs_flash_vnode) - 4);
134 if (crc != le32toh(vnode->node_crc)) {
135 err = chfs_update_eb_dirty(chmp,
136 cheb, le32toh(vnode->length));
137 if (err) {
138 return err;
141 return CHFS_NODE_BADCRC;
144 vno = le64toh(vnode->vno);
146 mutex_enter(&chmp->chm_lock_vnocache);
147 vc = chfs_vnode_cache_get(chmp, vno);
148 if (!vc) {
149 vc = chfs_scan_make_vnode_cache(chmp, vno);
150 if (!vc) {
151 mutex_exit(&chmp->chm_lock_vnocache);
152 return ENOMEM;
155 mutex_exit(&chmp->chm_lock_vnocache);
157 nref = chfs_alloc_node_ref(cheb);
159 nref->nref_offset = ofs;
161 KASSERT(nref->nref_lnr == cheb->lnr);
163 /* Check version of vnode. */
164 if ((struct chfs_vnode_cache *)vc->v != vc) {
165 if (le64toh(vnode->version) > *vc->vno_version) {
166 //err = chfs_update_eb_dirty(chmp, &chmp->chm_blocks[vc->v->lnr],
167 // sizeof(struct chfs_flash_vnode));
168 *vc->vno_version = le64toh(vnode->version);
169 chfs_add_vnode_ref_to_vc(chmp, vc, nref);
170 } else {
171 err = chfs_update_eb_dirty(chmp, cheb,
172 sizeof(struct chfs_flash_vnode));
173 return CHFS_NODE_OK;
175 } else {
176 vc->vno_version = kmem_alloc(sizeof(uint64_t), KM_SLEEP);
177 if (!vc->vno_version)
178 return ENOMEM;
179 *vc->vno_version = le64toh(vnode->version);
180 chfs_add_vnode_ref_to_vc(chmp, vc, nref);
183 mutex_enter(&chmp->chm_lock_sizes);
184 //dbg("B:lnr: %d |free_size: %d node's size: %d\n", cheb->lnr, cheb->free_size, le32toh(vnode->length));
185 chfs_change_size_free(chmp, cheb, -le32toh(vnode->length));
186 chfs_change_size_used(chmp, cheb, le32toh(vnode->length));
187 mutex_exit(&chmp->chm_lock_sizes);
189 KASSERT(cheb->used_size <= chmp->chm_ebh->eb_size);
191 KASSERT(cheb->used_size + cheb->free_size + cheb->dirty_size + cheb->unchecked_size + cheb->wasted_size == chmp->chm_ebh->eb_size);
193 //dbg(" A: free_size: %d\n", cheb->free_size);
195 /*dbg("vnode dump:\n");
196 dbg(" ->magic: 0x%x\n", le16toh(vnode->magic));
197 dbg(" ->type: %d\n", le16toh(vnode->type));
198 dbg(" ->length: %d\n", le32toh(vnode->length));
199 dbg(" ->hdr_crc: 0x%x\n", le32toh(vnode->hdr_crc));
200 dbg(" ->vno: %d\n", le64toh(vnode->vno));
201 dbg(" ->version: %ld\n", le64toh(vnode->version));
202 dbg(" ->uid: %d\n", le16toh(vnode->uid));
203 dbg(" ->gid: %d\n", le16toh(vnode->gid));
204 dbg(" ->mode: %d\n", le32toh(vnode->mode));
205 dbg(" ->dn_size: %d\n", le32toh(vnode->dn_size));
206 dbg(" ->atime: %d\n", le32toh(vnode->atime));
207 dbg(" ->mtime: %d\n", le32toh(vnode->mtime));
208 dbg(" ->ctime: %d\n", le32toh(vnode->ctime));
209 dbg(" ->dsize: %d\n", le32toh(vnode->dsize));
210 dbg(" ->node_crc: 0x%x\n", le32toh(vnode->node_crc));*/
212 return CHFS_NODE_OK;
216 chfs_scan_mark_dirent_obsolete(struct chfs_mount *chmp,
217 struct chfs_vnode_cache *vc, struct chfs_dirent *fd)
219 //int size;
220 struct chfs_eraseblock *cheb;
221 struct chfs_node_ref *prev, *nref;
223 nref = fd->nref;
224 cheb = &chmp->chm_blocks[fd->nref->nref_lnr];
226 /* Remove dirent's node ref from vnode cache */
227 prev = vc->dirents;
228 if (prev && prev == nref) {
229 vc->dirents = prev->nref_next;
230 } else if (prev && prev != (void *)vc) {
231 while (prev->nref_next && prev->nref_next !=
232 (void *)vc && prev->nref_next != nref) {
233 prev = prev->nref_next;
236 if (prev->nref_next == nref) {
237 prev->nref_next = nref->nref_next;
240 /*dbg("XXX - start\n");
241 //nref = vc->dirents;
242 struct chfs_dirent *tmp;
243 tmp = vc->scan_dirents;
244 while (tmp) {
245 dbg(" ->tmp->name: %s\n", tmp->name);
246 dbg(" ->tmp->version: %ld\n", tmp->version);
247 dbg(" ->tmp->vno: %d\n", tmp->vno);
248 tmp = tmp->next;
250 dbg("XXX - end\n");*/
251 //size = CHFS_PAD(sizeof(struct chfs_flash_dirent_node) + fd->nsize);
253 KASSERT(cheb->used_size + cheb->free_size + cheb->dirty_size +
254 cheb->unchecked_size + cheb->wasted_size == chmp->chm_ebh->eb_size);
256 return 0;
259 void
260 chfs_add_fd_to_list(struct chfs_mount *chmp,
261 struct chfs_dirent *new, struct chfs_vnode_cache *pvc)
263 KASSERT(mutex_owned(&chmp->chm_lock_mountfields));
264 int size;
265 struct chfs_eraseblock *cheb, *oldcheb;
266 // struct chfs_dirent **prev;
267 struct chfs_dirent *fd, *tmpfd;
269 dbg("adding fd to list: %s\n", new->name);
271 if ((new->version > pvc->highest_version))
272 pvc->highest_version = new->version;
274 size = CHFS_PAD(sizeof(struct chfs_flash_dirent_node) +
275 new->nsize);
276 cheb = &chmp->chm_blocks[new->nref->nref_lnr];
278 mutex_enter(&chmp->chm_lock_sizes);
279 TAILQ_FOREACH_SAFE(fd, &pvc->scan_dirents, fds, tmpfd) {
280 if (fd->nhash > new->nhash) {
281 /* insert new before fd */
282 TAILQ_INSERT_BEFORE(fd, new, fds);
283 goto out;
284 } else if (fd->nhash == new->nhash &&
285 !strcmp(fd->name, new->name)) {
286 if (new->version > fd->version) {
287 // new->next = fd->next;
288 /* replace fd with new */
289 TAILQ_INSERT_BEFORE(fd, new, fds);
290 chfs_change_size_free(chmp, cheb, -size);
291 chfs_change_size_used(chmp, cheb, size);
293 TAILQ_REMOVE(&pvc->scan_dirents, fd, fds);
294 if (fd->nref) {
295 size = CHFS_PAD(sizeof(struct chfs_flash_dirent_node) + fd->nsize);
296 chfs_scan_mark_dirent_obsolete(chmp, pvc, fd);
297 oldcheb = &chmp->chm_blocks[fd->nref->nref_lnr];
298 chfs_change_size_used(chmp, oldcheb, -size);
299 chfs_change_size_dirty(chmp, oldcheb, size);
301 chfs_free_dirent(fd);
302 // *prev = new;//XXX
303 } else {
304 chfs_scan_mark_dirent_obsolete(chmp, pvc, new);
305 chfs_change_size_free(chmp, cheb, -size);
306 chfs_change_size_dirty(chmp, cheb, size);
307 chfs_free_dirent(new);
309 /*dbg("START\n");
310 fd = pvc->scan_dirents;
311 while (fd) {
312 dbg("dirent dump:\n");
313 dbg(" ->vno: %d\n", fd->vno);
314 dbg(" ->version: %ld\n", fd->version);
315 dbg(" ->nhash: 0x%x\n", fd->nhash);
316 dbg(" ->nsize: %d\n", fd->nsize);
317 dbg(" ->name: %s\n", fd->name);
318 dbg(" ->type: %d\n", fd->type);
319 fd = fd->next;
321 dbg("END\n");*/
322 mutex_exit(&chmp->chm_lock_sizes);
323 return;
326 /* if we couldnt fit it elsewhere, lets add to the end */
327 TAILQ_INSERT_TAIL(&pvc->scan_dirents, new, fds);
329 out:
330 //dbg("B:lnr: %d |free_size: %d size: %d\n", cheb->lnr, cheb->free_size, size);
331 chfs_change_size_free(chmp, cheb, -size);
332 chfs_change_size_used(chmp, cheb, size);
333 mutex_exit(&chmp->chm_lock_sizes);
335 KASSERT(cheb->used_size <= chmp->chm_ebh->eb_size);
336 //dbg(" A: free_size: %d\n", cheb->free_size);
338 KASSERT(cheb->used_size + cheb->free_size + cheb->dirty_size + cheb->unchecked_size + cheb->wasted_size == chmp->chm_ebh->eb_size);
341 // fd = pvc->scan_dirents;
342 /*dbg("START\n");
343 while (fd) {
344 dbg("dirent dump:\n");
345 dbg(" ->vno: %d\n", fd->vno);
346 dbg(" ->version: %ld\n", fd->version);
347 dbg(" ->nhash: 0x%x\n", fd->nhash);
348 dbg(" ->nsize: %d\n", fd->nsize);
349 dbg(" ->name: %s\n", fd->name);
350 dbg(" ->type: %d\n", fd->type);
351 fd = fd->next;
353 dbg("END\n");*/
356 * chfs_scan_check_dirent_node - check vnode crc and add to vnode cache
357 * @chmp: CHFS main descriptor structure
358 * @cheb: eraseblock informations
359 * @buf: directory entry to check
360 * @ofs: offset in eraseblock where dirent starts
363 chfs_scan_check_dirent_node(struct chfs_mount *chmp,
364 struct chfs_eraseblock *cheb, void *buf, off_t ofs)
366 int err, namelen;
367 uint32_t crc;
368 struct chfs_dirent *fd;
369 struct chfs_vnode_cache *vc;
370 struct chfs_flash_dirent_node *dirent = buf;
372 //struct chfs_node_ref *tmp;
374 crc = crc32(0, (uint8_t *)dirent, sizeof(*dirent) - 4);
375 if (crc != le32toh(dirent->node_crc)) {
376 err = chfs_update_eb_dirty(chmp, cheb, le32toh(dirent->length));
377 if (err)
378 return err;
379 return CHFS_NODE_BADCRC;
381 namelen = dirent->nsize;
383 fd = chfs_alloc_dirent(namelen + 1);
384 if (!fd)
385 return ENOMEM;
387 fd->nref = chfs_alloc_node_ref(cheb);
388 if (!fd->nref)
389 return ENOMEM;
391 KASSERT(fd->nref->nref_lnr == cheb->lnr);
393 memcpy(&fd->name, dirent->name, namelen);
394 fd->nsize = namelen;
395 fd->name[namelen] = 0;
396 crc = crc32(0, fd->name, dirent->nsize);
397 if (crc != le32toh(dirent->name_crc)) {
398 chfs_err("Directory entry's name has bad crc: read: 0x%x, "
399 "calculated: 0x%x\n", le32toh(dirent->name_crc), crc);
400 chfs_free_dirent(fd);
401 err = chfs_update_eb_dirty(chmp, cheb, le32toh(dirent->length));
402 if (err)
403 return err;
404 return CHFS_NODE_BADNAMECRC;
407 /* Check vnode_cache of parent node */
408 mutex_enter(&chmp->chm_lock_vnocache);
409 vc = chfs_scan_make_vnode_cache(chmp, le64toh(dirent->pvno));
410 mutex_exit(&chmp->chm_lock_vnocache);
411 if (!vc) {
412 chfs_free_dirent(fd);
413 return ENOMEM;
416 fd->nref->nref_offset = ofs;
418 dbg("add dirent to #%llu\n", (unsigned long long)vc->vno);
419 chfs_add_node_to_list(chmp, vc, fd->nref, &vc->dirents);
420 /*tmp = vc->dirents;
421 dbg("START|vno: %d dirents dump\n", vc->vno);
422 while (tmp) {
423 dbg(" ->nref->nref_lnr: %d\n", tmp->lnr);
424 dbg(" ->nref->nref_offset: %d\n", tmp->offset);
425 tmp = tmp->next;
427 dbg(" END|vno: %d dirents dump\n", vc->vno);*/
429 // fd->next = NULL;
430 fd->vno = le64toh(dirent->vno);
431 fd->version = le64toh(dirent->version);
432 fd->nhash = hash32_buf(fd->name, namelen, HASH32_BUF_INIT);
433 fd->type = dirent->dtype;
435 /*dbg("dirent dump:\n");
436 dbg(" ->vno: %d\n", fd->vno);
437 dbg(" ->version: %ld\n", fd->version);
438 dbg(" ->nhash: 0x%x\n", fd->nhash);
439 dbg(" ->nsize: %d\n", fd->nsize);
440 dbg(" ->name: %s\n", fd->name);
441 dbg(" ->type: %d\n", fd->type);*/
443 chfs_add_fd_to_list(chmp, fd, vc);
445 /*struct chfs_node_ref *tmp;
446 tmp = vc->dirents;
447 dbg("START|vno: %d dirents dump\n", vc->vno);
448 while (tmp) {
449 dbg(" ->nref->nref_lnr: %d\n", tmp->lnr);
450 dbg(" ->nref->nref_offset: %d\n", tmp->offset);
451 tmp = tmp->next;
453 dbg(" END|vno: %d dirents dump\n", vc->vno);*/
455 /*dbg("dirent dump:\n");
456 dbg(" ->magic: 0x%x\n", le16toh(dirent->magic));
457 dbg(" ->type: %d\n", le16toh(dirent->type));
458 dbg(" ->length: %d\n", le32toh(dirent->length));
459 dbg(" ->hdr_crc: 0x%x\n", le32toh(dirent->hdr_crc));
460 dbg(" ->vno: %d\n", le64toh(dirent->vno));
461 dbg(" ->pvno: %d\n", le64toh(dirent->pvno));
462 dbg(" ->version: %ld\n", le64toh(dirent->version));
463 dbg(" ->mctime: %d\n", le32toh(dirent->mctime));
464 dbg(" ->nsize: %d\n", dirent->nsize);
465 dbg(" ->dtype: %d\n", dirent->dtype);
466 dbg(" ->name_crc: 0x%x\n", le32toh(dirent->name_crc));
467 dbg(" ->node_crc: 0x%x\n", le32toh(dirent->node_crc));
468 dbg(" ->name: %s\n", dirent->name);*/
470 return CHFS_NODE_OK;
474 * chfs_scan_check_data_node - check vnode crc and add to vnode cache
475 * @chmp: CHFS main descriptor structure
476 * @cheb: eraseblock informations
477 * @buf: data node to check
478 * @ofs: offset in eraseblock where data node starts
481 chfs_scan_check_data_node(struct chfs_mount *chmp,
482 struct chfs_eraseblock *cheb, void *buf, off_t ofs)
484 KASSERT(mutex_owned(&chmp->chm_lock_mountfields));
485 int err;
486 uint32_t crc, vno;
487 struct chfs_node_ref *nref;
488 struct chfs_vnode_cache *vc;
489 struct chfs_flash_data_node *dnode = buf;
491 crc = crc32(0, (uint8_t *)dnode, sizeof(struct chfs_flash_data_node) - 4);
492 if (crc != le32toh(dnode->node_crc)) {
493 err = chfs_update_eb_dirty(chmp, cheb, le32toh(dnode->length));
494 if (err)
495 return err;
496 return CHFS_NODE_BADCRC;
499 * Don't check data nodes crc and version here, it will be done in
500 * the background GC thread.
502 nref = chfs_alloc_node_ref(cheb);
503 if (!nref)
504 return ENOMEM;
506 nref->nref_offset = ofs | CHFS_UNCHECKED_NODE_MASK;
508 KASSERT(nref->nref_lnr == cheb->lnr);
510 vno = le64toh(dnode->vno);
511 mutex_enter(&chmp->chm_lock_vnocache);
512 vc = chfs_vnode_cache_get(chmp, vno);
513 if (!vc) {
514 vc = chfs_scan_make_vnode_cache(chmp, vno);
515 if (!vc)
516 return ENOMEM;
518 mutex_exit(&chmp->chm_lock_vnocache);
519 chfs_add_node_to_list(chmp, vc, nref, &vc->dnode);
521 dbg("chmpfree: %u, chebfree: %u, dnode: %u\n", chmp->chm_free_size, cheb->free_size, dnode->length);
523 mutex_enter(&chmp->chm_lock_sizes);
524 chfs_change_size_free(chmp, cheb, -dnode->length);
525 chfs_change_size_unchecked(chmp, cheb, dnode->length);
526 mutex_exit(&chmp->chm_lock_sizes);
527 return CHFS_NODE_OK;
531 * chfs_scan_classify_cheb - determine eraseblock's state
532 * @chmp: CHFS main descriptor structure
533 * @cheb: eraseblock to classify
536 chfs_scan_classify_cheb(struct chfs_mount *chmp,
537 struct chfs_eraseblock *cheb)
539 if (cheb->free_size == chmp->chm_ebh->eb_size)
540 return CHFS_BLK_STATE_FREE;
541 else if (cheb->dirty_size < MAX_DIRTY_TO_CLEAN)
542 return CHFS_BLK_STATE_CLEAN;
543 else if (cheb->used_size || cheb->unchecked_size)
544 return CHFS_BLK_STATE_PARTDIRTY;
545 else
546 return CHFS_BLK_STATE_ALLDIRTY;
551 * chfs_scan_eraseblock - scans an eraseblock and looking for nodes
552 * @chmp: CHFS main descriptor structure
553 * @cheb: eraseblock to scan
555 * This function scans a whole eraseblock, checks the nodes on it and add them
556 * to the vnode cache.
557 * Returns eraseblock state on success, error code if fails.
560 chfs_scan_eraseblock(struct chfs_mount *chmp,
561 struct chfs_eraseblock *cheb) {
563 int err;
564 size_t len, retlen;
565 off_t ofs = 0;
566 int lnr = cheb->lnr;
567 u_char *buf;
568 struct chfs_flash_node_hdr *nhdr;
569 int read_free = 0;
570 struct chfs_node_ref *nref;
573 dbg("scanning eraseblock content: %d free_size: %d\n", cheb->lnr, cheb->free_size);
574 dbg("scanned physical block: %d\n", chmp->chm_ebh->lmap[lnr]);
575 buf = kmem_alloc(CHFS_MAX_NODE_SIZE, KM_SLEEP);
577 while((ofs + CHFS_NODE_HDR_SIZE) < chmp->chm_ebh->eb_size) {
578 memset(buf, 0 , CHFS_MAX_NODE_SIZE);
579 err = chfs_read_leb(chmp,
580 lnr, buf, ofs, CHFS_NODE_HDR_SIZE, &retlen);
581 if (err) {
582 return err;
585 if (retlen != CHFS_NODE_HDR_SIZE) {
586 chfs_err("Error reading node header: "
587 "read: %zu instead of: %zu\n",
588 CHFS_NODE_HDR_SIZE, retlen);
589 return EIO;
592 /* first we check if the buffer we read is full with 0xff, if yes maybe
593 * the blocks remaining area is free. We increase read_free and if it
594 * reaches MAX_READ_FREE we stop reading the block*/
595 if (check_pattern(buf, 0xff, 0, CHFS_NODE_HDR_SIZE)) {
596 read_free += CHFS_NODE_HDR_SIZE;
597 if (read_free >= MAX_READ_FREE(chmp)) {
598 dbg("rest of the block is free. Size: %d\n", cheb->free_size);
599 return chfs_scan_classify_cheb(chmp, cheb);
601 ofs += CHFS_NODE_HDR_SIZE;
602 continue;
603 } else {
604 chfs_update_eb_dirty(chmp, cheb, read_free);
605 read_free = 0;
608 nhdr = (struct chfs_flash_node_hdr *)buf;
610 err = chfs_scan_check_node_hdr(nhdr);
611 if (err) {
612 dbg("node hdr error\n");
613 err = chfs_update_eb_dirty(chmp, cheb, 4);
614 if (err) {
615 return err;
618 ofs += 4;
619 continue;
621 ofs += CHFS_NODE_HDR_SIZE;
622 if (ofs > chmp->chm_ebh->eb_size) {
623 chfs_err("Second part of node is on the next eraseblock.\n");
624 return EIO;
626 switch (le16toh(nhdr->type)) {
627 case CHFS_NODETYPE_VNODE:
628 /* Read up the node */
629 //dbg("nodetype vnode\n");
630 len = le32toh(nhdr->length) - CHFS_NODE_HDR_SIZE;
631 err = chfs_read_leb(chmp,
632 lnr, buf + CHFS_NODE_HDR_SIZE,
633 ofs, len, &retlen);
634 if (err) {
635 return err;
638 if (retlen != len) {
639 chfs_err("Error reading vnode: read: %zu instead of: %zu\n",
640 len, retlen);
641 return EIO;
643 KASSERT(lnr == cheb->lnr);
644 err = chfs_scan_check_vnode(chmp,
645 cheb, buf, ofs - CHFS_NODE_HDR_SIZE);
646 if (err) {
647 return err;
650 //dbg("XXX5end\n");
651 break;
652 case CHFS_NODETYPE_DIRENT:
653 /* Read up the node */
654 //dbg("nodetype dirent\n");
655 len = le32toh(nhdr->length) - CHFS_NODE_HDR_SIZE;
657 err = chfs_read_leb(chmp,
658 lnr, buf + CHFS_NODE_HDR_SIZE,
659 ofs, len, &retlen);
660 if (err) {
661 return err;
664 if (retlen != len) {
665 chfs_err("Error reading dirent node: read: %zu "
666 "instead of: %zu\n", len, retlen);
667 return EIO;
670 KASSERT(lnr == cheb->lnr);
672 err = chfs_scan_check_dirent_node(chmp,
673 cheb, buf, ofs - CHFS_NODE_HDR_SIZE);
674 if (err) {
675 return err;
678 //dbg("XXX6end\n");
679 break;
680 case CHFS_NODETYPE_DATA:
681 //dbg("nodetype data\n");
682 len = sizeof(struct chfs_flash_data_node) -
683 CHFS_NODE_HDR_SIZE;
684 err = chfs_read_leb(chmp,
685 lnr, buf + CHFS_NODE_HDR_SIZE,
686 ofs, len, &retlen);
687 if (err) {
688 return err;
691 if (retlen != len) {
692 chfs_err("Error reading data node: read: %zu "
693 "instead of: %zu\n", len, retlen);
694 return EIO;
696 KASSERT(lnr == cheb->lnr);
697 err = chfs_scan_check_data_node(chmp,
698 cheb, buf, ofs - CHFS_NODE_HDR_SIZE);
699 if (err)
700 return err;
702 //dbg("XXX7end\n");
703 break;
704 case CHFS_NODETYPE_PADDING:
705 //dbg("nodetype padding\n");
706 //dbg("padding len: %d\n", le32toh(nhdr->length));
707 //dbg("BEF: cheb->free_size: %d\n", cheb->free_size);
708 nref = chfs_alloc_node_ref(cheb);
709 nref->nref_offset = ofs - CHFS_NODE_HDR_SIZE;
710 nref->nref_offset = CHFS_GET_OFS(nref->nref_offset) |
711 CHFS_OBSOLETE_NODE_MASK;
713 err = chfs_update_eb_dirty(chmp, cheb,
714 le32toh(nhdr->length));
715 //dbg("AFT: cheb->free_size: %d\n", cheb->free_size);
716 if (err)
717 return err;
719 //dbg("XXX8end\n");
720 break;
721 default:
722 //dbg("nodetype ? (default)\n");
723 /* Unknown node type, update dirty and skip */
724 err = chfs_update_eb_dirty(chmp, cheb,
725 le32toh(nhdr->length));
726 if (err)
727 return err;
729 //dbg("XXX9end\n");
730 break;
732 ofs += le32toh(nhdr->length) - CHFS_NODE_HDR_SIZE;
735 KASSERT(cheb->used_size + cheb->free_size + cheb->dirty_size +
736 cheb->unchecked_size + cheb->wasted_size == chmp->chm_ebh->eb_size);
738 //dbg("XXX10\n");
739 return chfs_scan_classify_cheb(chmp, cheb);