etc/protocols - sync with NetBSD-8
[minix.git] / sys / ufs / chfs / chfs_scan.c
blob4fb21fd4acd5464d98ce2aa3d8012608c9a79e16
1 /* $NetBSD: chfs_scan.c,v 1.6 2015/02/07 04:19:52 christos Exp $ */
3 /*-
4 * Copyright (c) 2010 Department of Software Engineering,
5 * University of Szeged, Hungary
6 * Copyright (c) 2010 David Tengeri <dtengeri@inf.u-szeged.hu>
7 * All rights reserved.
9 * This code is derived from software contributed to The NetBSD Foundation
10 * by the Department of Software Engineering, University of Szeged, Hungary
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
22 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
23 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
24 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
28 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
29 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
34 #include "chfs.h"
37 * chfs_scan_make_vnode_cache - makes a new vnode cache during scan
38 * This function returns a vnode cache belonging to @vno.
40 struct chfs_vnode_cache *
41 chfs_scan_make_vnode_cache(struct chfs_mount *chmp, ino_t vno)
43 struct chfs_vnode_cache *vc;
45 KASSERT(mutex_owned(&chmp->chm_lock_vnocache));
47 /* vnode cache already exists */
48 vc = chfs_vnode_cache_get(chmp, vno);
49 if (vc) {
50 return vc;
53 /* update max vnode number if needed */
54 if (vno > chmp->chm_max_vno) {
55 chmp->chm_max_vno = vno;
58 /* create new vnode cache */
59 vc = chfs_vnode_cache_alloc(vno);
61 chfs_vnode_cache_add(chmp, vc);
63 if (vno == CHFS_ROOTINO) {
64 vc->nlink = 2;
65 vc->pvno = CHFS_ROOTINO;
66 vc->state = VNO_STATE_CHECKEDABSENT;
69 return vc;
73 * chfs_scan_check_node_hdr - checks node magic and crc
74 * Returns 0 if everything is OK, error code otherwise.
76 int
77 chfs_scan_check_node_hdr(struct chfs_flash_node_hdr *nhdr)
79 uint16_t magic;
80 uint32_t crc, hdr_crc;
82 magic = le16toh(nhdr->magic);
84 if (magic != CHFS_FS_MAGIC_BITMASK) {
85 dbg("bad magic\n");
86 return CHFS_NODE_BADMAGIC;
89 hdr_crc = le32toh(nhdr->hdr_crc);
90 crc = crc32(0, (uint8_t *)nhdr, CHFS_NODE_HDR_SIZE - 4);
92 if (crc != hdr_crc) {
93 dbg("bad crc\n");
94 return CHFS_NODE_BADCRC;
97 return CHFS_NODE_OK;
100 /* chfs_scan_check_vnode - check vnode crc and add it to vnode cache */
102 chfs_scan_check_vnode(struct chfs_mount *chmp,
103 struct chfs_eraseblock *cheb, void *buf, off_t ofs)
105 KASSERT(mutex_owned(&chmp->chm_lock_mountfields));
106 struct chfs_vnode_cache *vc;
107 struct chfs_flash_vnode *vnode = buf;
108 struct chfs_node_ref *nref;
109 int err;
110 uint32_t crc;
111 ino_t vno;
113 crc = crc32(0, (uint8_t *)vnode,
114 sizeof(struct chfs_flash_vnode) - 4);
116 /* check node crc */
117 if (crc != le32toh(vnode->node_crc)) {
118 err = chfs_update_eb_dirty(chmp,
119 cheb, le32toh(vnode->length));
120 if (err) {
121 return err;
124 return CHFS_NODE_BADCRC;
127 vno = le64toh(vnode->vno);
129 /* find the corresponding vnode cache */
130 mutex_enter(&chmp->chm_lock_vnocache);
131 vc = chfs_vnode_cache_get(chmp, vno);
132 if (!vc) {
133 vc = chfs_scan_make_vnode_cache(chmp, vno);
134 if (!vc) {
135 mutex_exit(&chmp->chm_lock_vnocache);
136 return ENOMEM;
140 nref = chfs_alloc_node_ref(cheb);
142 nref->nref_offset = ofs;
144 KASSERT(nref->nref_lnr == cheb->lnr);
146 /* check version of vnode */
147 if ((struct chfs_vnode_cache *)vc->v != vc) {
148 if (le64toh(vnode->version) > *vc->vno_version) {
149 *vc->vno_version = le64toh(vnode->version);
150 chfs_add_vnode_ref_to_vc(chmp, vc, nref);
151 } else {
152 err = chfs_update_eb_dirty(chmp, cheb,
153 sizeof(struct chfs_flash_vnode));
154 return CHFS_NODE_OK;
156 } else {
157 vc->vno_version = kmem_alloc(sizeof(uint64_t), KM_SLEEP);
158 if (!vc->vno_version)
159 return ENOMEM;
160 *vc->vno_version = le64toh(vnode->version);
161 chfs_add_vnode_ref_to_vc(chmp, vc, nref);
163 mutex_exit(&chmp->chm_lock_vnocache);
165 /* update sizes */
166 mutex_enter(&chmp->chm_lock_sizes);
167 chfs_change_size_free(chmp, cheb, -le32toh(vnode->length));
168 chfs_change_size_used(chmp, cheb, le32toh(vnode->length));
169 mutex_exit(&chmp->chm_lock_sizes);
171 KASSERT(cheb->used_size <= chmp->chm_ebh->eb_size);
173 KASSERT(cheb->used_size + cheb->free_size + cheb->dirty_size + cheb->unchecked_size + cheb->wasted_size == chmp->chm_ebh->eb_size);
175 return CHFS_NODE_OK;
178 /* chfs_scan_mark_dirent_obsolete - marks a directory entry "obsolete" */
180 chfs_scan_mark_dirent_obsolete(struct chfs_mount *chmp,
181 struct chfs_vnode_cache *vc, struct chfs_dirent *fd)
183 struct chfs_eraseblock *cheb;
184 struct chfs_node_ref *prev, *nref;
186 nref = fd->nref;
187 cheb = &chmp->chm_blocks[fd->nref->nref_lnr];
189 /* remove dirent's node ref from vnode cache */
190 prev = vc->dirents;
191 if (prev && prev == nref) {
192 vc->dirents = prev->nref_next;
193 } else if (prev && prev != (void *)vc) {
194 while (prev->nref_next && prev->nref_next != (void *)vc) {
195 if (prev->nref_next == nref) {
196 prev->nref_next = nref->nref_next;
197 break;
199 prev = prev->nref_next;
203 KASSERT(cheb->used_size + cheb->free_size + cheb->dirty_size +
204 cheb->unchecked_size + cheb->wasted_size == chmp->chm_ebh->eb_size);
206 return 0;
209 /* chfs_add_fd_to_list - adds a directory entry to its parent's vnode cache */
210 void
211 chfs_add_fd_to_list(struct chfs_mount *chmp,
212 struct chfs_dirent *new, struct chfs_vnode_cache *pvc)
214 KASSERT(mutex_owned(&chmp->chm_lock_mountfields));
215 int size;
216 struct chfs_eraseblock *cheb, *oldcheb;
217 struct chfs_dirent *fd, *tmpfd;
219 dbg("adding fd to list: %s\n", new->name);
221 /* update highest version if needed */
222 if ((new->version > pvc->highest_version))
223 pvc->highest_version = new->version;
225 size = CHFS_PAD(sizeof(struct chfs_flash_dirent_node) +
226 new->nsize);
227 cheb = &chmp->chm_blocks[new->nref->nref_lnr];
229 mutex_enter(&chmp->chm_lock_sizes);
230 TAILQ_FOREACH_SAFE(fd, &pvc->scan_dirents, fds, tmpfd) {
231 if (fd->nhash > new->nhash) {
232 /* insert new before fd */
233 TAILQ_INSERT_BEFORE(fd, new, fds);
234 goto out;
235 } else if (fd->nhash == new->nhash &&
236 !strcmp(fd->name, new->name)) {
237 if (new->version > fd->version) {
238 /* replace fd with new */
239 TAILQ_INSERT_BEFORE(fd, new, fds);
240 chfs_change_size_free(chmp, cheb, -size);
241 chfs_change_size_used(chmp, cheb, size);
243 TAILQ_REMOVE(&pvc->scan_dirents, fd, fds);
244 if (fd->nref) {
245 size = CHFS_PAD(sizeof(struct chfs_flash_dirent_node) + fd->nsize);
246 chfs_scan_mark_dirent_obsolete(chmp, pvc, fd);
247 oldcheb = &chmp->chm_blocks[fd->nref->nref_lnr];
248 chfs_change_size_used(chmp, oldcheb, -size);
249 chfs_change_size_dirty(chmp, oldcheb, size);
251 chfs_free_dirent(fd);
252 } else {
253 /* new dirent is older */
254 chfs_scan_mark_dirent_obsolete(chmp, pvc, new);
255 chfs_change_size_free(chmp, cheb, -size);
256 chfs_change_size_dirty(chmp, cheb, size);
257 chfs_free_dirent(new);
259 mutex_exit(&chmp->chm_lock_sizes);
260 return;
263 /* if we couldnt fit it elsewhere, lets add to the end */
264 TAILQ_INSERT_TAIL(&pvc->scan_dirents, new, fds);
266 out:
267 /* update sizes */
268 chfs_change_size_free(chmp, cheb, -size);
269 chfs_change_size_used(chmp, cheb, size);
270 mutex_exit(&chmp->chm_lock_sizes);
272 KASSERT(cheb->used_size <= chmp->chm_ebh->eb_size);
274 KASSERT(cheb->used_size + cheb->free_size + cheb->dirty_size + cheb->unchecked_size + cheb->wasted_size == chmp->chm_ebh->eb_size);
277 /* chfs_scan_check_dirent_node - check vnode crc and add to vnode cache */
279 chfs_scan_check_dirent_node(struct chfs_mount *chmp,
280 struct chfs_eraseblock *cheb, void *buf, off_t ofs)
282 int err, namelen;
283 uint32_t crc;
284 struct chfs_dirent *fd;
285 struct chfs_vnode_cache *parentvc;
286 struct chfs_flash_dirent_node *dirent = buf;
288 /* check crc */
289 crc = crc32(0, (uint8_t *)dirent, sizeof(*dirent) - 4);
290 if (crc != le32toh(dirent->node_crc)) {
291 err = chfs_update_eb_dirty(chmp, cheb, le32toh(dirent->length));
292 if (err)
293 return err;
294 return CHFS_NODE_BADCRC;
297 /* allocate space for name */
298 namelen = dirent->nsize;
300 fd = chfs_alloc_dirent(namelen + 1);
301 if (!fd)
302 return ENOMEM;
304 /* allocate an nref */
305 fd->nref = chfs_alloc_node_ref(cheb);
306 if (!fd->nref)
307 return ENOMEM;
309 KASSERT(fd->nref->nref_lnr == cheb->lnr);
311 memcpy(&fd->name, dirent->name, namelen);
312 fd->nsize = namelen;
313 fd->name[namelen] = 0;
314 crc = crc32(0, fd->name, dirent->nsize);
315 if (crc != le32toh(dirent->name_crc)) {
316 chfs_err("Directory entry's name has bad crc: read: 0x%x, "
317 "calculated: 0x%x\n", le32toh(dirent->name_crc), crc);
318 chfs_free_dirent(fd);
319 err = chfs_update_eb_dirty(chmp, cheb, le32toh(dirent->length));
320 if (err)
321 return err;
322 return CHFS_NODE_BADNAMECRC;
325 /* check vnode_cache of parent node */
326 mutex_enter(&chmp->chm_lock_vnocache);
327 parentvc = chfs_scan_make_vnode_cache(chmp, le64toh(dirent->pvno));
328 if (!parentvc) {
329 chfs_free_dirent(fd);
330 return ENOMEM;
333 fd->nref->nref_offset = ofs;
335 dbg("add dirent to #%llu\n", (unsigned long long)parentvc->vno);
336 chfs_add_node_to_list(chmp, parentvc, fd->nref, &parentvc->dirents);
337 mutex_exit(&chmp->chm_lock_vnocache);
339 fd->vno = le64toh(dirent->vno);
340 fd->version = le64toh(dirent->version);
341 fd->nhash = hash32_buf(fd->name, namelen, HASH32_BUF_INIT);
342 fd->type = dirent->dtype;
344 chfs_add_fd_to_list(chmp, fd, parentvc);
346 return CHFS_NODE_OK;
349 /* chfs_scan_check_data_node - check vnode crc and add to vnode cache */
351 chfs_scan_check_data_node(struct chfs_mount *chmp,
352 struct chfs_eraseblock *cheb, void *buf, off_t ofs)
354 KASSERT(mutex_owned(&chmp->chm_lock_mountfields));
355 int err;
356 uint32_t crc, vno;
357 struct chfs_node_ref *nref;
358 struct chfs_vnode_cache *vc;
359 struct chfs_flash_data_node *dnode = buf;
361 /* check crc */
362 crc = crc32(0, (uint8_t *)dnode, sizeof(struct chfs_flash_data_node) - 4);
363 if (crc != le32toh(dnode->node_crc)) {
364 err = chfs_update_eb_dirty(chmp, cheb, le32toh(dnode->length));
365 if (err)
366 return err;
367 return CHFS_NODE_BADCRC;
370 * Don't check data nodes crc and version here, it will be done in
371 * the background GC thread.
373 nref = chfs_alloc_node_ref(cheb);
374 if (!nref)
375 return ENOMEM;
377 nref->nref_offset = CHFS_GET_OFS(ofs) | CHFS_UNCHECKED_NODE_MASK;
379 KASSERT(nref->nref_lnr == cheb->lnr);
381 vno = le64toh(dnode->vno);
382 mutex_enter(&chmp->chm_lock_vnocache);
383 vc = chfs_vnode_cache_get(chmp, vno);
384 if (!vc) {
385 vc = chfs_scan_make_vnode_cache(chmp, vno);
386 if (!vc)
387 return ENOMEM;
389 chfs_add_node_to_list(chmp, vc, nref, &vc->dnode);
390 mutex_exit(&chmp->chm_lock_vnocache);
392 dbg("chmpfree: %u, chebfree: %u, dnode: %u\n", chmp->chm_free_size, cheb->free_size, dnode->length);
394 /* update sizes */
395 mutex_enter(&chmp->chm_lock_sizes);
396 chfs_change_size_free(chmp, cheb, -dnode->length);
397 chfs_change_size_unchecked(chmp, cheb, dnode->length);
398 mutex_exit(&chmp->chm_lock_sizes);
399 return CHFS_NODE_OK;
402 /* chfs_scan_classify_cheb - determine eraseblock's state */
404 chfs_scan_classify_cheb(struct chfs_mount *chmp,
405 struct chfs_eraseblock *cheb)
407 if (cheb->free_size == chmp->chm_ebh->eb_size)
408 return CHFS_BLK_STATE_FREE;
409 else if (cheb->dirty_size < MAX_DIRTY_TO_CLEAN)
410 return CHFS_BLK_STATE_CLEAN;
411 else if (cheb->used_size || cheb->unchecked_size)
412 return CHFS_BLK_STATE_PARTDIRTY;
413 else
414 return CHFS_BLK_STATE_ALLDIRTY;
419 * chfs_scan_eraseblock - scans an eraseblock and looking for nodes
421 * This function scans a whole eraseblock, checks the nodes on it and add them
422 * to the vnode cache.
423 * Returns eraseblock state on success, error code if fails.
426 chfs_scan_eraseblock(struct chfs_mount *chmp,
427 struct chfs_eraseblock *cheb)
429 int err;
430 size_t len, retlen;
431 off_t ofs = 0;
432 int lnr = cheb->lnr;
433 u_char *buf;
434 struct chfs_flash_node_hdr *nhdr;
435 int read_free = 0;
436 struct chfs_node_ref *nref;
438 dbg("scanning eraseblock content: %d free_size: %d\n", cheb->lnr, cheb->free_size);
439 dbg("scanned physical block: %d\n", chmp->chm_ebh->lmap[lnr]);
440 buf = kmem_alloc(CHFS_MAX_NODE_SIZE, KM_SLEEP);
442 while((ofs + CHFS_NODE_HDR_SIZE) < chmp->chm_ebh->eb_size) {
443 memset(buf, 0 , CHFS_MAX_NODE_SIZE);
444 err = chfs_read_leb(chmp,
445 lnr, buf, ofs, CHFS_NODE_HDR_SIZE, &retlen);
446 if (err)
447 goto err_return;
449 if (retlen != CHFS_NODE_HDR_SIZE) {
450 chfs_err("Error reading node header: "
451 "read: %zu instead of: %zu\n",
452 CHFS_NODE_HDR_SIZE, retlen);
453 err = EIO;
454 goto err_return;
457 /* first we check if the buffer we read is full with 0xff, if yes maybe
458 * the blocks remaining area is free. We increase read_free and if it
459 * reaches MAX_READ_FREE we stop reading the block */
460 if (check_pattern(buf, 0xff, 0, CHFS_NODE_HDR_SIZE)) {
461 read_free += CHFS_NODE_HDR_SIZE;
462 if (read_free >= MAX_READ_FREE(chmp)) {
463 dbg("rest of the block is free. Size: %d\n", cheb->free_size);
464 kmem_free(buf, CHFS_MAX_NODE_SIZE);
465 return chfs_scan_classify_cheb(chmp, cheb);
467 ofs += CHFS_NODE_HDR_SIZE;
468 continue;
469 } else {
470 chfs_update_eb_dirty(chmp, cheb, read_free);
471 read_free = 0;
474 nhdr = (struct chfs_flash_node_hdr *)buf;
476 err = chfs_scan_check_node_hdr(nhdr);
477 if (err) {
478 dbg("node hdr error\n");
479 err = chfs_update_eb_dirty(chmp, cheb, 4);
480 if (err)
481 goto err_return;
483 ofs += 4;
484 continue;
486 ofs += CHFS_NODE_HDR_SIZE;
487 if (ofs > chmp->chm_ebh->eb_size) {
488 chfs_err("Second part of node is on the next eraseblock.\n");
489 err = EIO;
490 goto err_return;
492 switch (le16toh(nhdr->type)) {
493 case CHFS_NODETYPE_VNODE:
494 /* vnode information */
495 /* read up the node */
496 len = le32toh(nhdr->length) - CHFS_NODE_HDR_SIZE;
497 err = chfs_read_leb(chmp,
498 lnr, buf + CHFS_NODE_HDR_SIZE,
499 ofs, len, &retlen);
500 if (err)
501 goto err_return;
503 if (retlen != len) {
504 chfs_err("Error reading vnode: read: %zu instead of: %zu\n",
505 len, retlen);
506 err = EIO;
507 goto err_return;
509 KASSERT(lnr == cheb->lnr);
510 err = chfs_scan_check_vnode(chmp,
511 cheb, buf, ofs - CHFS_NODE_HDR_SIZE);
512 if (err)
513 goto err_return;
515 break;
516 case CHFS_NODETYPE_DIRENT:
517 /* directory entry */
518 /* read up the node */
519 len = le32toh(nhdr->length) - CHFS_NODE_HDR_SIZE;
521 err = chfs_read_leb(chmp,
522 lnr, buf + CHFS_NODE_HDR_SIZE,
523 ofs, len, &retlen);
524 if (err)
525 goto err_return;
527 if (retlen != len) {
528 chfs_err("Error reading dirent node: read: %zu "
529 "instead of: %zu\n", len, retlen);
530 err = EIO;
531 goto err_return;
534 KASSERT(lnr == cheb->lnr);
536 err = chfs_scan_check_dirent_node(chmp,
537 cheb, buf, ofs - CHFS_NODE_HDR_SIZE);
538 if (err)
539 goto err_return;
541 break;
542 case CHFS_NODETYPE_DATA:
543 /* data node */
544 len = sizeof(struct chfs_flash_data_node) -
545 CHFS_NODE_HDR_SIZE;
546 err = chfs_read_leb(chmp,
547 lnr, buf + CHFS_NODE_HDR_SIZE,
548 ofs, len, &retlen);
549 if (err)
550 goto err_return;
552 if (retlen != len) {
553 chfs_err("Error reading data node: read: %zu "
554 "instead of: %zu\n", len, retlen);
555 err = EIO;
556 goto err_return;
558 KASSERT(lnr == cheb->lnr);
559 err = chfs_scan_check_data_node(chmp,
560 cheb, buf, ofs - CHFS_NODE_HDR_SIZE);
561 if (err)
562 goto err_return;
564 break;
565 case CHFS_NODETYPE_PADDING:
566 /* padding node, set size and update dirty */
567 nref = chfs_alloc_node_ref(cheb);
568 nref->nref_offset = ofs - CHFS_NODE_HDR_SIZE;
569 nref->nref_offset = CHFS_GET_OFS(nref->nref_offset) |
570 CHFS_OBSOLETE_NODE_MASK;
572 err = chfs_update_eb_dirty(chmp, cheb,
573 le32toh(nhdr->length));
574 if (err)
575 goto err_return;
577 break;
578 default:
579 /* unknown node type, update dirty and skip */
580 err = chfs_update_eb_dirty(chmp, cheb,
581 le32toh(nhdr->length));
582 if (err)
583 goto err_return;
585 break;
587 ofs += le32toh(nhdr->length) - CHFS_NODE_HDR_SIZE;
590 KASSERT(cheb->used_size + cheb->free_size + cheb->dirty_size +
591 cheb->unchecked_size + cheb->wasted_size == chmp->chm_ebh->eb_size);
593 err = chfs_scan_classify_cheb(chmp, cheb);
594 /* FALLTHROUGH */
595 err_return:
596 kmem_free(buf, CHFS_MAX_NODE_SIZE);
597 return err;