1 /* $NetBSD: chfs_scan.c,v 1.6 2015/02/07 04:19:52 christos Exp $ */
4 * Copyright (c) 2010 Department of Software Engineering,
5 * University of Szeged, Hungary
6 * Copyright (c) 2010 David Tengeri <dtengeri@inf.u-szeged.hu>
9 * This code is derived from software contributed to The NetBSD Foundation
10 * by the Department of Software Engineering, University of Szeged, Hungary
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
22 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
23 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
24 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
28 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
29 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * chfs_scan_make_vnode_cache - makes a new vnode cache during scan
38 * This function returns a vnode cache belonging to @vno.
40 struct chfs_vnode_cache
*
41 chfs_scan_make_vnode_cache(struct chfs_mount
*chmp
, ino_t vno
)
43 struct chfs_vnode_cache
*vc
;
45 KASSERT(mutex_owned(&chmp
->chm_lock_vnocache
));
47 /* vnode cache already exists */
48 vc
= chfs_vnode_cache_get(chmp
, vno
);
53 /* update max vnode number if needed */
54 if (vno
> chmp
->chm_max_vno
) {
55 chmp
->chm_max_vno
= vno
;
58 /* create new vnode cache */
59 vc
= chfs_vnode_cache_alloc(vno
);
61 chfs_vnode_cache_add(chmp
, vc
);
63 if (vno
== CHFS_ROOTINO
) {
65 vc
->pvno
= CHFS_ROOTINO
;
66 vc
->state
= VNO_STATE_CHECKEDABSENT
;
73 * chfs_scan_check_node_hdr - checks node magic and crc
74 * Returns 0 if everything is OK, error code otherwise.
77 chfs_scan_check_node_hdr(struct chfs_flash_node_hdr
*nhdr
)
80 uint32_t crc
, hdr_crc
;
82 magic
= le16toh(nhdr
->magic
);
84 if (magic
!= CHFS_FS_MAGIC_BITMASK
) {
86 return CHFS_NODE_BADMAGIC
;
89 hdr_crc
= le32toh(nhdr
->hdr_crc
);
90 crc
= crc32(0, (uint8_t *)nhdr
, CHFS_NODE_HDR_SIZE
- 4);
94 return CHFS_NODE_BADCRC
;
100 /* chfs_scan_check_vnode - check vnode crc and add it to vnode cache */
102 chfs_scan_check_vnode(struct chfs_mount
*chmp
,
103 struct chfs_eraseblock
*cheb
, void *buf
, off_t ofs
)
105 KASSERT(mutex_owned(&chmp
->chm_lock_mountfields
));
106 struct chfs_vnode_cache
*vc
;
107 struct chfs_flash_vnode
*vnode
= buf
;
108 struct chfs_node_ref
*nref
;
113 crc
= crc32(0, (uint8_t *)vnode
,
114 sizeof(struct chfs_flash_vnode
) - 4);
117 if (crc
!= le32toh(vnode
->node_crc
)) {
118 err
= chfs_update_eb_dirty(chmp
,
119 cheb
, le32toh(vnode
->length
));
124 return CHFS_NODE_BADCRC
;
127 vno
= le64toh(vnode
->vno
);
129 /* find the corresponding vnode cache */
130 mutex_enter(&chmp
->chm_lock_vnocache
);
131 vc
= chfs_vnode_cache_get(chmp
, vno
);
133 vc
= chfs_scan_make_vnode_cache(chmp
, vno
);
135 mutex_exit(&chmp
->chm_lock_vnocache
);
140 nref
= chfs_alloc_node_ref(cheb
);
142 nref
->nref_offset
= ofs
;
144 KASSERT(nref
->nref_lnr
== cheb
->lnr
);
146 /* check version of vnode */
147 if ((struct chfs_vnode_cache
*)vc
->v
!= vc
) {
148 if (le64toh(vnode
->version
) > *vc
->vno_version
) {
149 *vc
->vno_version
= le64toh(vnode
->version
);
150 chfs_add_vnode_ref_to_vc(chmp
, vc
, nref
);
152 err
= chfs_update_eb_dirty(chmp
, cheb
,
153 sizeof(struct chfs_flash_vnode
));
157 vc
->vno_version
= kmem_alloc(sizeof(uint64_t), KM_SLEEP
);
158 if (!vc
->vno_version
)
160 *vc
->vno_version
= le64toh(vnode
->version
);
161 chfs_add_vnode_ref_to_vc(chmp
, vc
, nref
);
163 mutex_exit(&chmp
->chm_lock_vnocache
);
166 mutex_enter(&chmp
->chm_lock_sizes
);
167 chfs_change_size_free(chmp
, cheb
, -le32toh(vnode
->length
));
168 chfs_change_size_used(chmp
, cheb
, le32toh(vnode
->length
));
169 mutex_exit(&chmp
->chm_lock_sizes
);
171 KASSERT(cheb
->used_size
<= chmp
->chm_ebh
->eb_size
);
173 KASSERT(cheb
->used_size
+ cheb
->free_size
+ cheb
->dirty_size
+ cheb
->unchecked_size
+ cheb
->wasted_size
== chmp
->chm_ebh
->eb_size
);
178 /* chfs_scan_mark_dirent_obsolete - marks a directory entry "obsolete" */
180 chfs_scan_mark_dirent_obsolete(struct chfs_mount
*chmp
,
181 struct chfs_vnode_cache
*vc
, struct chfs_dirent
*fd
)
183 struct chfs_eraseblock
*cheb
;
184 struct chfs_node_ref
*prev
, *nref
;
187 cheb
= &chmp
->chm_blocks
[fd
->nref
->nref_lnr
];
189 /* remove dirent's node ref from vnode cache */
191 if (prev
&& prev
== nref
) {
192 vc
->dirents
= prev
->nref_next
;
193 } else if (prev
&& prev
!= (void *)vc
) {
194 while (prev
->nref_next
&& prev
->nref_next
!= (void *)vc
) {
195 if (prev
->nref_next
== nref
) {
196 prev
->nref_next
= nref
->nref_next
;
199 prev
= prev
->nref_next
;
203 KASSERT(cheb
->used_size
+ cheb
->free_size
+ cheb
->dirty_size
+
204 cheb
->unchecked_size
+ cheb
->wasted_size
== chmp
->chm_ebh
->eb_size
);
209 /* chfs_add_fd_to_list - adds a directory entry to its parent's vnode cache */
211 chfs_add_fd_to_list(struct chfs_mount
*chmp
,
212 struct chfs_dirent
*new, struct chfs_vnode_cache
*pvc
)
214 KASSERT(mutex_owned(&chmp
->chm_lock_mountfields
));
216 struct chfs_eraseblock
*cheb
, *oldcheb
;
217 struct chfs_dirent
*fd
, *tmpfd
;
219 dbg("adding fd to list: %s\n", new->name
);
221 /* update highest version if needed */
222 if ((new->version
> pvc
->highest_version
))
223 pvc
->highest_version
= new->version
;
225 size
= CHFS_PAD(sizeof(struct chfs_flash_dirent_node
) +
227 cheb
= &chmp
->chm_blocks
[new->nref
->nref_lnr
];
229 mutex_enter(&chmp
->chm_lock_sizes
);
230 TAILQ_FOREACH_SAFE(fd
, &pvc
->scan_dirents
, fds
, tmpfd
) {
231 if (fd
->nhash
> new->nhash
) {
232 /* insert new before fd */
233 TAILQ_INSERT_BEFORE(fd
, new, fds
);
235 } else if (fd
->nhash
== new->nhash
&&
236 !strcmp(fd
->name
, new->name
)) {
237 if (new->version
> fd
->version
) {
238 /* replace fd with new */
239 TAILQ_INSERT_BEFORE(fd
, new, fds
);
240 chfs_change_size_free(chmp
, cheb
, -size
);
241 chfs_change_size_used(chmp
, cheb
, size
);
243 TAILQ_REMOVE(&pvc
->scan_dirents
, fd
, fds
);
245 size
= CHFS_PAD(sizeof(struct chfs_flash_dirent_node
) + fd
->nsize
);
246 chfs_scan_mark_dirent_obsolete(chmp
, pvc
, fd
);
247 oldcheb
= &chmp
->chm_blocks
[fd
->nref
->nref_lnr
];
248 chfs_change_size_used(chmp
, oldcheb
, -size
);
249 chfs_change_size_dirty(chmp
, oldcheb
, size
);
251 chfs_free_dirent(fd
);
253 /* new dirent is older */
254 chfs_scan_mark_dirent_obsolete(chmp
, pvc
, new);
255 chfs_change_size_free(chmp
, cheb
, -size
);
256 chfs_change_size_dirty(chmp
, cheb
, size
);
257 chfs_free_dirent(new);
259 mutex_exit(&chmp
->chm_lock_sizes
);
263 /* if we couldnt fit it elsewhere, lets add to the end */
264 TAILQ_INSERT_TAIL(&pvc
->scan_dirents
, new, fds
);
268 chfs_change_size_free(chmp
, cheb
, -size
);
269 chfs_change_size_used(chmp
, cheb
, size
);
270 mutex_exit(&chmp
->chm_lock_sizes
);
272 KASSERT(cheb
->used_size
<= chmp
->chm_ebh
->eb_size
);
274 KASSERT(cheb
->used_size
+ cheb
->free_size
+ cheb
->dirty_size
+ cheb
->unchecked_size
+ cheb
->wasted_size
== chmp
->chm_ebh
->eb_size
);
277 /* chfs_scan_check_dirent_node - check vnode crc and add to vnode cache */
279 chfs_scan_check_dirent_node(struct chfs_mount
*chmp
,
280 struct chfs_eraseblock
*cheb
, void *buf
, off_t ofs
)
284 struct chfs_dirent
*fd
;
285 struct chfs_vnode_cache
*parentvc
;
286 struct chfs_flash_dirent_node
*dirent
= buf
;
289 crc
= crc32(0, (uint8_t *)dirent
, sizeof(*dirent
) - 4);
290 if (crc
!= le32toh(dirent
->node_crc
)) {
291 err
= chfs_update_eb_dirty(chmp
, cheb
, le32toh(dirent
->length
));
294 return CHFS_NODE_BADCRC
;
297 /* allocate space for name */
298 namelen
= dirent
->nsize
;
300 fd
= chfs_alloc_dirent(namelen
+ 1);
304 /* allocate an nref */
305 fd
->nref
= chfs_alloc_node_ref(cheb
);
309 KASSERT(fd
->nref
->nref_lnr
== cheb
->lnr
);
311 memcpy(&fd
->name
, dirent
->name
, namelen
);
313 fd
->name
[namelen
] = 0;
314 crc
= crc32(0, fd
->name
, dirent
->nsize
);
315 if (crc
!= le32toh(dirent
->name_crc
)) {
316 chfs_err("Directory entry's name has bad crc: read: 0x%x, "
317 "calculated: 0x%x\n", le32toh(dirent
->name_crc
), crc
);
318 chfs_free_dirent(fd
);
319 err
= chfs_update_eb_dirty(chmp
, cheb
, le32toh(dirent
->length
));
322 return CHFS_NODE_BADNAMECRC
;
325 /* check vnode_cache of parent node */
326 mutex_enter(&chmp
->chm_lock_vnocache
);
327 parentvc
= chfs_scan_make_vnode_cache(chmp
, le64toh(dirent
->pvno
));
329 chfs_free_dirent(fd
);
333 fd
->nref
->nref_offset
= ofs
;
335 dbg("add dirent to #%llu\n", (unsigned long long)parentvc
->vno
);
336 chfs_add_node_to_list(chmp
, parentvc
, fd
->nref
, &parentvc
->dirents
);
337 mutex_exit(&chmp
->chm_lock_vnocache
);
339 fd
->vno
= le64toh(dirent
->vno
);
340 fd
->version
= le64toh(dirent
->version
);
341 fd
->nhash
= hash32_buf(fd
->name
, namelen
, HASH32_BUF_INIT
);
342 fd
->type
= dirent
->dtype
;
344 chfs_add_fd_to_list(chmp
, fd
, parentvc
);
349 /* chfs_scan_check_data_node - check vnode crc and add to vnode cache */
351 chfs_scan_check_data_node(struct chfs_mount
*chmp
,
352 struct chfs_eraseblock
*cheb
, void *buf
, off_t ofs
)
354 KASSERT(mutex_owned(&chmp
->chm_lock_mountfields
));
357 struct chfs_node_ref
*nref
;
358 struct chfs_vnode_cache
*vc
;
359 struct chfs_flash_data_node
*dnode
= buf
;
362 crc
= crc32(0, (uint8_t *)dnode
, sizeof(struct chfs_flash_data_node
) - 4);
363 if (crc
!= le32toh(dnode
->node_crc
)) {
364 err
= chfs_update_eb_dirty(chmp
, cheb
, le32toh(dnode
->length
));
367 return CHFS_NODE_BADCRC
;
370 * Don't check data nodes crc and version here, it will be done in
371 * the background GC thread.
373 nref
= chfs_alloc_node_ref(cheb
);
377 nref
->nref_offset
= CHFS_GET_OFS(ofs
) | CHFS_UNCHECKED_NODE_MASK
;
379 KASSERT(nref
->nref_lnr
== cheb
->lnr
);
381 vno
= le64toh(dnode
->vno
);
382 mutex_enter(&chmp
->chm_lock_vnocache
);
383 vc
= chfs_vnode_cache_get(chmp
, vno
);
385 vc
= chfs_scan_make_vnode_cache(chmp
, vno
);
389 chfs_add_node_to_list(chmp
, vc
, nref
, &vc
->dnode
);
390 mutex_exit(&chmp
->chm_lock_vnocache
);
392 dbg("chmpfree: %u, chebfree: %u, dnode: %u\n", chmp
->chm_free_size
, cheb
->free_size
, dnode
->length
);
395 mutex_enter(&chmp
->chm_lock_sizes
);
396 chfs_change_size_free(chmp
, cheb
, -dnode
->length
);
397 chfs_change_size_unchecked(chmp
, cheb
, dnode
->length
);
398 mutex_exit(&chmp
->chm_lock_sizes
);
402 /* chfs_scan_classify_cheb - determine eraseblock's state */
404 chfs_scan_classify_cheb(struct chfs_mount
*chmp
,
405 struct chfs_eraseblock
*cheb
)
407 if (cheb
->free_size
== chmp
->chm_ebh
->eb_size
)
408 return CHFS_BLK_STATE_FREE
;
409 else if (cheb
->dirty_size
< MAX_DIRTY_TO_CLEAN
)
410 return CHFS_BLK_STATE_CLEAN
;
411 else if (cheb
->used_size
|| cheb
->unchecked_size
)
412 return CHFS_BLK_STATE_PARTDIRTY
;
414 return CHFS_BLK_STATE_ALLDIRTY
;
419 * chfs_scan_eraseblock - scans an eraseblock and looking for nodes
421 * This function scans a whole eraseblock, checks the nodes on it and add them
422 * to the vnode cache.
423 * Returns eraseblock state on success, error code if fails.
426 chfs_scan_eraseblock(struct chfs_mount
*chmp
,
427 struct chfs_eraseblock
*cheb
)
434 struct chfs_flash_node_hdr
*nhdr
;
436 struct chfs_node_ref
*nref
;
438 dbg("scanning eraseblock content: %d free_size: %d\n", cheb
->lnr
, cheb
->free_size
);
439 dbg("scanned physical block: %d\n", chmp
->chm_ebh
->lmap
[lnr
]);
440 buf
= kmem_alloc(CHFS_MAX_NODE_SIZE
, KM_SLEEP
);
442 while((ofs
+ CHFS_NODE_HDR_SIZE
) < chmp
->chm_ebh
->eb_size
) {
443 memset(buf
, 0 , CHFS_MAX_NODE_SIZE
);
444 err
= chfs_read_leb(chmp
,
445 lnr
, buf
, ofs
, CHFS_NODE_HDR_SIZE
, &retlen
);
449 if (retlen
!= CHFS_NODE_HDR_SIZE
) {
450 chfs_err("Error reading node header: "
451 "read: %zu instead of: %zu\n",
452 CHFS_NODE_HDR_SIZE
, retlen
);
457 /* first we check if the buffer we read is full with 0xff, if yes maybe
458 * the blocks remaining area is free. We increase read_free and if it
459 * reaches MAX_READ_FREE we stop reading the block */
460 if (check_pattern(buf
, 0xff, 0, CHFS_NODE_HDR_SIZE
)) {
461 read_free
+= CHFS_NODE_HDR_SIZE
;
462 if (read_free
>= MAX_READ_FREE(chmp
)) {
463 dbg("rest of the block is free. Size: %d\n", cheb
->free_size
);
464 kmem_free(buf
, CHFS_MAX_NODE_SIZE
);
465 return chfs_scan_classify_cheb(chmp
, cheb
);
467 ofs
+= CHFS_NODE_HDR_SIZE
;
470 chfs_update_eb_dirty(chmp
, cheb
, read_free
);
474 nhdr
= (struct chfs_flash_node_hdr
*)buf
;
476 err
= chfs_scan_check_node_hdr(nhdr
);
478 dbg("node hdr error\n");
479 err
= chfs_update_eb_dirty(chmp
, cheb
, 4);
486 ofs
+= CHFS_NODE_HDR_SIZE
;
487 if (ofs
> chmp
->chm_ebh
->eb_size
) {
488 chfs_err("Second part of node is on the next eraseblock.\n");
492 switch (le16toh(nhdr
->type
)) {
493 case CHFS_NODETYPE_VNODE
:
494 /* vnode information */
495 /* read up the node */
496 len
= le32toh(nhdr
->length
) - CHFS_NODE_HDR_SIZE
;
497 err
= chfs_read_leb(chmp
,
498 lnr
, buf
+ CHFS_NODE_HDR_SIZE
,
504 chfs_err("Error reading vnode: read: %zu instead of: %zu\n",
509 KASSERT(lnr
== cheb
->lnr
);
510 err
= chfs_scan_check_vnode(chmp
,
511 cheb
, buf
, ofs
- CHFS_NODE_HDR_SIZE
);
516 case CHFS_NODETYPE_DIRENT
:
517 /* directory entry */
518 /* read up the node */
519 len
= le32toh(nhdr
->length
) - CHFS_NODE_HDR_SIZE
;
521 err
= chfs_read_leb(chmp
,
522 lnr
, buf
+ CHFS_NODE_HDR_SIZE
,
528 chfs_err("Error reading dirent node: read: %zu "
529 "instead of: %zu\n", len
, retlen
);
534 KASSERT(lnr
== cheb
->lnr
);
536 err
= chfs_scan_check_dirent_node(chmp
,
537 cheb
, buf
, ofs
- CHFS_NODE_HDR_SIZE
);
542 case CHFS_NODETYPE_DATA
:
544 len
= sizeof(struct chfs_flash_data_node
) -
546 err
= chfs_read_leb(chmp
,
547 lnr
, buf
+ CHFS_NODE_HDR_SIZE
,
553 chfs_err("Error reading data node: read: %zu "
554 "instead of: %zu\n", len
, retlen
);
558 KASSERT(lnr
== cheb
->lnr
);
559 err
= chfs_scan_check_data_node(chmp
,
560 cheb
, buf
, ofs
- CHFS_NODE_HDR_SIZE
);
565 case CHFS_NODETYPE_PADDING
:
566 /* padding node, set size and update dirty */
567 nref
= chfs_alloc_node_ref(cheb
);
568 nref
->nref_offset
= ofs
- CHFS_NODE_HDR_SIZE
;
569 nref
->nref_offset
= CHFS_GET_OFS(nref
->nref_offset
) |
570 CHFS_OBSOLETE_NODE_MASK
;
572 err
= chfs_update_eb_dirty(chmp
, cheb
,
573 le32toh(nhdr
->length
));
579 /* unknown node type, update dirty and skip */
580 err
= chfs_update_eb_dirty(chmp
, cheb
,
581 le32toh(nhdr
->length
));
587 ofs
+= le32toh(nhdr
->length
) - CHFS_NODE_HDR_SIZE
;
590 KASSERT(cheb
->used_size
+ cheb
->free_size
+ cheb
->dirty_size
+
591 cheb
->unchecked_size
+ cheb
->wasted_size
== chmp
->chm_ebh
->eb_size
);
593 err
= chfs_scan_classify_cheb(chmp
, cheb
);
596 kmem_free(buf
, CHFS_MAX_NODE_SIZE
);