1 /* $NetBSD: chfs_readinode.c,v 1.9 2014/09/01 16:31:17 he Exp $ */
4 * Copyright (c) 2010 Department of Software Engineering,
5 * University of Szeged, Hungary
6 * Copyright (C) 2010 David Tengeri <dtengeri@inf.u-szeged.hu>
7 * Copyright (C) 2010 Tamas Toth <ttoth@inf.u-szeged.hu>
8 * Copyright (C) 2010 Adam Hoka <ahoka@NetBSD.org>
11 * This code is derived from software contributed to The NetBSD Foundation
12 * by the Department of Software Engineering, University of Szeged, Hungary
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in the
21 * documentation and/or other materials provided with the distribution.
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
40 /* tmp node operations */
41 int chfs_check_td_data(struct chfs_mount
*,
42 struct chfs_tmp_dnode
*);
43 int chfs_check_td_node(struct chfs_mount
*,
44 struct chfs_tmp_dnode
*);
45 struct chfs_node_ref
*chfs_first_valid_data_ref(struct chfs_node_ref
*);
46 int chfs_add_tmp_dnode_to_tree(struct chfs_mount
*,
47 struct chfs_readinode_info
*,
48 struct chfs_tmp_dnode
*);
49 void chfs_add_tmp_dnode_to_tdi(struct chfs_tmp_dnode_info
*,
50 struct chfs_tmp_dnode
*);
51 void chfs_remove_tmp_dnode_from_tdi(struct chfs_tmp_dnode_info
*,
52 struct chfs_tmp_dnode
*);
53 static void chfs_kill_td(struct chfs_mount
*,
54 struct chfs_tmp_dnode
*);
55 static void chfs_kill_tdi(struct chfs_mount
*,
56 struct chfs_tmp_dnode_info
*);
57 /* frag node operations */
58 struct chfs_node_frag
*new_fragment(struct chfs_full_dnode
*,
61 int no_overlapping_node(struct rb_tree
*, struct chfs_node_frag
*,
62 struct chfs_node_frag
*, uint32_t);
63 int chfs_add_frag_to_fragtree(struct chfs_mount
*,
65 struct chfs_node_frag
*);
66 void chfs_obsolete_node_frag(struct chfs_mount
*,
67 struct chfs_node_frag
*);
68 /* general node operations */
69 int chfs_get_data_nodes(struct chfs_mount
*,
71 struct chfs_readinode_info
*);
72 int chfs_build_fragtree(struct chfs_mount
*,
74 struct chfs_readinode_info
*);
78 /* tmp node rbtree operations */
80 tmp_node_compare_nodes(void *ctx
, const void *n1
, const void *n2
)
82 const struct chfs_tmp_dnode_info
*tdi1
= n1
;
83 const struct chfs_tmp_dnode_info
*tdi2
= n2
;
85 return (tdi1
->tmpnode
->node
->ofs
- tdi2
->tmpnode
->node
->ofs
);
89 tmp_node_compare_key(void *ctx
, const void *n
, const void *key
)
91 const struct chfs_tmp_dnode_info
*tdi
= n
;
92 uint64_t ofs
= *(const uint64_t *)key
;
94 return (tdi
->tmpnode
->node
->ofs
- ofs
);
97 const rb_tree_ops_t tmp_node_rbtree_ops
= {
98 .rbto_compare_nodes
= tmp_node_compare_nodes
,
99 .rbto_compare_key
= tmp_node_compare_key
,
100 .rbto_node_offset
= offsetof(struct chfs_tmp_dnode_info
, rb_node
),
105 /* frag node rbtree operations */
107 frag_compare_nodes(void *ctx
, const void *n1
, const void *n2
)
109 const struct chfs_node_frag
*frag1
= n1
;
110 const struct chfs_node_frag
*frag2
= n2
;
112 return (frag1
->ofs
- frag2
->ofs
);
116 frag_compare_key(void *ctx
, const void *n
, const void *key
)
118 const struct chfs_node_frag
*frag
= n
;
119 uint64_t ofs
= *(const uint64_t *)key
;
121 return (frag
->ofs
- ofs
);
124 const rb_tree_ops_t frag_rbtree_ops
= {
125 .rbto_compare_nodes
= frag_compare_nodes
,
126 .rbto_compare_key
= frag_compare_key
,
127 .rbto_node_offset
= offsetof(struct chfs_node_frag
, rb_node
),
133 * chfs_check_td_data - checks the data CRC of the node
135 * Returns: 0 - if everything OK;
136 * 1 - if CRC is incorrect;
138 * error code if an error occured.
141 chfs_check_td_data(struct chfs_mount
*chmp
,
142 struct chfs_tmp_dnode
*td
)
145 size_t retlen
, len
, totlen
;
149 struct chfs_node_ref
*nref
= td
->node
->nref
;
151 KASSERT(mutex_owned(&chmp
->chm_lock_mountfields
));
152 KASSERT(!mutex_owned(&chmp
->chm_lock_sizes
));
154 ofs
= CHFS_GET_OFS(nref
->nref_offset
) + sizeof(struct chfs_flash_data_node
);
155 len
= td
->node
->size
;
160 buf
= kmem_alloc(len
, KM_SLEEP
);
162 dbg("allocating error\n");
165 err
= chfs_read_leb(chmp
, nref
->nref_lnr
, buf
, ofs
, len
, &retlen
);
167 dbg("error while reading: %d\n", err
);
174 dbg("len:%zu, retlen:%zu\n", len
, retlen
);
178 crc
= crc32(0, (uint8_t *)buf
, len
);
180 if (crc
!= td
->data_crc
) {
181 dbg("crc failed, calculated: 0x%x, orig: 0x%x\n", crc
, td
->data_crc
);
187 CHFS_MARK_REF_NORMAL(nref
);
188 totlen
= CHFS_PAD(sizeof(struct chfs_flash_data_node
) + len
);
190 mutex_enter(&chmp
->chm_lock_sizes
);
191 chfs_change_size_unchecked(chmp
, &chmp
->chm_blocks
[nref
->nref_lnr
], -totlen
);
192 chfs_change_size_used(chmp
, &chmp
->chm_blocks
[nref
->nref_lnr
], totlen
);
193 mutex_exit(&chmp
->chm_lock_sizes
);
194 KASSERT(chmp
->chm_blocks
[nref
->nref_lnr
].used_size
<= chmp
->chm_ebh
->eb_size
);
202 /* chfs_check_td_node - checks a temporary node */
204 chfs_check_td_node(struct chfs_mount
*chmp
, struct chfs_tmp_dnode
*td
)
208 if (CHFS_REF_FLAGS(td
->node
->nref
) != CHFS_UNCHECKED_NODE_MASK
)
211 ret
= chfs_check_td_data(chmp
, td
);
216 * chfs_first_valid_data_ref -
217 * returns the first valid nref after the given nref
219 struct chfs_node_ref
*
220 chfs_first_valid_data_ref(struct chfs_node_ref
*nref
)
223 if (!CHFS_REF_OBSOLETE(nref
)) {
225 if (nref
->nref_lnr
== REF_EMPTY_NODE
) {
226 dbg("FIRST VALID IS EMPTY!\n");
232 if (nref
->nref_next
) {
233 nref
= nref
->nref_next
;
241 * chfs_add_tmp_dnode_to_tdi -
242 * adds a temporary node to a temporary node descriptor
245 chfs_add_tmp_dnode_to_tdi(struct chfs_tmp_dnode_info
*tdi
,
246 struct chfs_tmp_dnode
*td
)
249 /* The chain is empty. */
252 /* Insert into the chain. */
253 struct chfs_tmp_dnode
*tmp
= tdi
->tmpnode
;
262 * chfs_remove_tmp_dnode_from_tdi -
263 * removes a temporary node from its descriptor
266 chfs_remove_tmp_dnode_from_tdi(struct chfs_tmp_dnode_info
*tdi
,
267 struct chfs_tmp_dnode
*td
)
269 if (tdi
->tmpnode
== td
) {
270 /* It's the first in the chain. */
271 tdi
->tmpnode
= tdi
->tmpnode
->next
;
273 /* Remove from the middle of the chain. */
274 struct chfs_tmp_dnode
*tmp
= tdi
->tmpnode
->next
;
275 while (tmp
->next
&& tmp
->next
!= td
) {
279 tmp
->next
= td
->next
;
284 /* chfs_kill_td - removes all components of a temporary node */
286 chfs_kill_td(struct chfs_mount
*chmp
,
287 struct chfs_tmp_dnode
*td
)
289 struct chfs_vnode_cache
*vc
;
291 mutex_enter(&chmp
->chm_lock_vnocache
);
292 /* Remove the node from the vnode cache's data node chain. */
293 vc
= chfs_nref_to_vc(td
->node
->nref
);
294 chfs_remove_and_obsolete(chmp
, vc
, td
->node
->nref
, &vc
->dnode
);
295 mutex_exit(&chmp
->chm_lock_vnocache
);
298 chfs_free_tmp_dnode(td
);
301 /* chfs_kill_tdi - removes a temporary node descriptor */
303 chfs_kill_tdi(struct chfs_mount
*chmp
,
304 struct chfs_tmp_dnode_info
*tdi
)
306 struct chfs_tmp_dnode
*next
, *tmp
= tdi
->tmpnode
;
308 /* Iterate the chain and remove all temporary node from it. */
311 chfs_kill_td(chmp
, tmp
);
315 chfs_free_tmp_dnode_info(tdi
);
319 * chfs_add_tmp_dnode_to_tree -
320 * adds a temporary node to the temporary tree
323 chfs_add_tmp_dnode_to_tree(struct chfs_mount
*chmp
,
324 struct chfs_readinode_info
*rii
,
325 struct chfs_tmp_dnode
*newtd
)
327 uint64_t end_ofs
= newtd
->node
->ofs
+ newtd
->node
->size
;
328 struct chfs_tmp_dnode_info
*this;
329 struct rb_node
*node
, *prev_node
;
330 struct chfs_tmp_dnode_info
*newtdi
;
332 node
= rb_tree_find_node(&rii
->tdi_root
, &newtd
->node
->ofs
);
334 this = (struct chfs_tmp_dnode_info
*)node
;
335 while (this->tmpnode
->overlapped
) {
336 prev_node
= rb_tree_iterate(&rii
->tdi_root
, node
, RB_DIR_LEFT
);
338 this->tmpnode
->overlapped
= 0;
342 this = (struct chfs_tmp_dnode_info
*)node
;
347 this = (struct chfs_tmp_dnode_info
*)node
;
348 if (this->tmpnode
->node
->ofs
> end_ofs
)
351 struct chfs_tmp_dnode
*tmp_td
= this->tmpnode
;
353 if (tmp_td
->version
== newtd
->version
) {
354 /* This is a new version of an old node. */
355 if (!chfs_check_td_node(chmp
, tmp_td
)) {
356 dbg("calling kill td 0\n");
357 chfs_kill_td(chmp
, newtd
);
360 chfs_remove_tmp_dnode_from_tdi(this, tmp_td
);
361 chfs_kill_td(chmp
, tmp_td
);
362 chfs_add_tmp_dnode_to_tdi(this, newtd
);
366 if (tmp_td
->version
< newtd
->version
&&
367 tmp_td
->node
->ofs
>= newtd
->node
->ofs
&&
368 tmp_td
->node
->ofs
+ tmp_td
->node
->size
<= end_ofs
) {
369 /* New node entirely overlaps 'this' */
370 if (chfs_check_td_node(chmp
, newtd
)) {
371 dbg("calling kill td 2\n");
372 chfs_kill_td(chmp
, newtd
);
375 /* ... and is good. Kill 'this' and any subsequent nodes which are also overlapped */
376 while (tmp_td
&& tmp_td
->node
->ofs
+ tmp_td
->node
->size
<= end_ofs
) {
377 struct rb_node
*next
= rb_tree_iterate(&rii
->tdi_root
, this, RB_DIR_RIGHT
);
378 struct chfs_tmp_dnode_info
*next_tdi
= (struct chfs_tmp_dnode_info
*)next
;
379 struct chfs_tmp_dnode
*next_td
= NULL
;
381 next_td
= tmp_td
->next
;
382 } else if (next_tdi
) {
383 next_td
= next_tdi
->tmpnode
;
385 if (tmp_td
->version
< newtd
->version
) {
386 chfs_remove_tmp_dnode_from_tdi(this, tmp_td
);
387 chfs_kill_td(chmp
, tmp_td
);
388 if (!this->tmpnode
) {
389 rb_tree_remove_node(&rii
->tdi_root
, this);
390 chfs_kill_tdi(chmp
, this);
398 if (tmp_td
->version
> newtd
->version
&&
399 tmp_td
->node
->ofs
<= newtd
->node
->ofs
&&
400 tmp_td
->node
->ofs
+ tmp_td
->node
->size
>= end_ofs
) {
401 /* New node entirely overlapped by 'this' */
402 if (!chfs_check_td_node(chmp
, tmp_td
)) {
403 dbg("this version: %llu\n",
404 (unsigned long long)tmp_td
->version
);
405 dbg("this ofs: %llu, size: %u\n",
406 (unsigned long long)tmp_td
->node
->ofs
,
408 dbg("calling kill td 4\n");
409 chfs_kill_td(chmp
, newtd
);
412 /* ... but 'this' was bad. Replace it... */
413 chfs_remove_tmp_dnode_from_tdi(this, tmp_td
);
414 chfs_kill_td(chmp
, tmp_td
);
415 if (!this->tmpnode
) {
416 rb_tree_remove_node(&rii
->tdi_root
, this);
417 chfs_kill_tdi(chmp
, this);
419 dbg("calling kill td 5\n");
420 chfs_kill_td(chmp
, newtd
);
423 tmp_td
= tmp_td
->next
;
425 node
= rb_tree_iterate(&rii
->tdi_root
, node
, RB_DIR_RIGHT
);
428 newtdi
= chfs_alloc_tmp_dnode_info();
429 chfs_add_tmp_dnode_to_tdi(newtdi
, newtd
);
430 /* We neither completely obsoleted nor were completely
431 obsoleted by an earlier node. Insert into the tree */
432 struct chfs_tmp_dnode_info
*tmp_tdi
= rb_tree_insert_node(&rii
->tdi_root
, newtdi
);
433 if (tmp_tdi
!= newtdi
) {
434 chfs_remove_tmp_dnode_from_tdi(newtdi
, newtd
);
435 chfs_add_tmp_dnode_to_tdi(tmp_tdi
, newtd
);
436 chfs_kill_tdi(chmp
, newtdi
);
439 /* If there's anything behind that overlaps us, note it */
440 node
= rb_tree_iterate(&rii
->tdi_root
, node
, RB_DIR_LEFT
);
443 this = (struct chfs_tmp_dnode_info
*)node
;
444 if (this->tmpnode
->node
->ofs
+ this->tmpnode
->node
->size
> newtd
->node
->ofs
) {
445 newtd
->overlapped
= 1;
447 if (!this->tmpnode
->overlapped
)
450 prev_node
= rb_tree_iterate(&rii
->tdi_root
, node
, RB_DIR_LEFT
);
452 this->tmpnode
->overlapped
= 0;
459 /* If the new node overlaps anything ahead, note it */
460 node
= rb_tree_iterate(&rii
->tdi_root
, node
, RB_DIR_RIGHT
);
461 this = (struct chfs_tmp_dnode_info
*)node
;
462 while (this && this->tmpnode
->node
->ofs
< end_ofs
) {
463 this->tmpnode
->overlapped
= 1;
464 node
= rb_tree_iterate(&rii
->tdi_root
, node
, RB_DIR_RIGHT
);
465 this = (struct chfs_tmp_dnode_info
*)node
;
471 /* new_fragment - creates a new fragment for a data node */
472 struct chfs_node_frag
*
473 new_fragment(struct chfs_full_dnode
*fdn
, uint32_t ofs
, uint32_t size
)
475 struct chfs_node_frag
*newfrag
;
476 newfrag
= chfs_alloc_node_frag();
478 /* Initialize fragment. */
480 newfrag
->size
= size
;
483 newfrag
->node
->frags
++;
486 chfs_err("cannot allocate a chfs_node_frag object\n");
492 * no_overlapping_node - inserts a node to the fragtree
493 * Puts hole frag into the holes between fragments.
496 no_overlapping_node(struct rb_tree
*fragtree
,
497 struct chfs_node_frag
*newfrag
,
498 struct chfs_node_frag
*this, uint32_t lastend
)
500 if (lastend
< newfrag
->node
->ofs
) {
501 struct chfs_node_frag
*holefrag
;
503 holefrag
= new_fragment(NULL
, lastend
, newfrag
->node
->ofs
- lastend
);
505 chfs_free_node_frag(newfrag
);
509 rb_tree_insert_node(fragtree
, holefrag
);
512 rb_tree_insert_node(fragtree
, newfrag
);
518 * chfs_add_frag_to_fragtree -
519 * adds a fragment to a data node's fragtree
522 chfs_add_frag_to_fragtree(struct chfs_mount
*chmp
,
523 struct rb_tree
*fragtree
,
524 struct chfs_node_frag
*newfrag
)
526 struct chfs_node_frag
*this;
528 KASSERT(mutex_owned(&chmp
->chm_lock_mountfields
));
530 /* Find the offset of frag which is before the new one. */
531 this = (struct chfs_node_frag
*)rb_tree_find_node_leq(fragtree
, &newfrag
->ofs
);
534 lastend
= this->ofs
+ this->size
;
539 /* New fragment is end of the file and there is no overlapping. */
540 if (lastend
<= newfrag
->ofs
) {
541 if (lastend
&& (lastend
- 1) >> PAGE_SHIFT
== newfrag
->ofs
>> PAGE_SHIFT
) {
543 CHFS_MARK_REF_NORMAL(this->node
->nref
);
544 CHFS_MARK_REF_NORMAL(newfrag
->node
->nref
);
546 return no_overlapping_node(fragtree
, newfrag
, this, lastend
);
549 if (newfrag
->ofs
> this->ofs
) {
550 CHFS_MARK_REF_NORMAL(newfrag
->node
->nref
);
552 CHFS_MARK_REF_NORMAL(this->node
->nref
);
554 if (this->ofs
+ this->size
> newfrag
->ofs
+ newfrag
->size
) {
555 /* Newfrag is inside of this. */
556 struct chfs_node_frag
*newfrag2
;
558 newfrag2
= new_fragment(this->node
, newfrag
->ofs
+ newfrag
->size
,
559 this->ofs
+ this->size
- newfrag
->ofs
- newfrag
->size
);
563 this->size
= newfrag
->ofs
- this->ofs
;
565 rb_tree_insert_node(fragtree
, newfrag
);
566 rb_tree_insert_node(fragtree
, newfrag2
);
570 /* Newfrag is bottom of this. */
571 this->size
= newfrag
->ofs
- this->ofs
;
572 rb_tree_insert_node(fragtree
, newfrag
);
574 /* Newfrag start at same point */
575 //TODO replace instead of remove and insert
576 rb_tree_remove_node(fragtree
, this);
577 rb_tree_insert_node(fragtree
, newfrag
);
579 if (newfrag
->ofs
+ newfrag
->size
>= this->ofs
+this->size
) {
580 chfs_obsolete_node_frag(chmp
, this);
582 this->ofs
+= newfrag
->size
;
583 this->size
-= newfrag
->size
;
585 rb_tree_insert_node(fragtree
, this);
589 /* OK, now we have newfrag added in the correct place in the tree, but
590 frag_next(newfrag) may be a fragment which is overlapped by it
592 while ((this = frag_next(fragtree
, newfrag
)) && newfrag
->ofs
+ newfrag
->size
>= this->ofs
+ this->size
) {
593 rb_tree_remove_node(fragtree
, this);
594 chfs_obsolete_node_frag(chmp
, this);
597 if (!this || newfrag
->ofs
+ newfrag
->size
== this->ofs
)
600 this->size
= (this->ofs
+ this->size
) - (newfrag
->ofs
+ newfrag
->size
);
601 this->ofs
= newfrag
->ofs
+ newfrag
->size
;
604 CHFS_MARK_REF_NORMAL(this->node
->nref
);
605 CHFS_MARK_REF_NORMAL(newfrag
->node
->nref
);
611 * chfs_remove_frags_of_node -
612 * removes all fragments from a fragtree and DOESN'T OBSOLETE them
615 chfs_remove_frags_of_node(struct chfs_mount
*chmp
, struct rb_tree
*fragtree
,
616 struct chfs_node_ref
*nref
)
618 KASSERT(mutex_owned(&chmp
->chm_lock_mountfields
));
619 struct chfs_node_frag
*this, *next
;
625 /* Iterate the tree and clean all elements. */
626 this = (struct chfs_node_frag
*)RB_TREE_MIN(fragtree
);
628 next
= frag_next(fragtree
, this);
629 if (this->node
->nref
== nref
) {
630 rb_tree_remove_node(fragtree
, this);
631 chfs_free_node_frag(this);
638 * chfs_kill_fragtree -
639 * removes all fragments from a fragtree and OBSOLETES them
642 chfs_kill_fragtree(struct chfs_mount
*chmp
, struct rb_tree
*fragtree
)
644 KASSERT(mutex_owned(&chmp
->chm_lock_mountfields
));
645 struct chfs_node_frag
*this, *next
;
647 /* Iterate the tree and clean all elements. */
648 this = (struct chfs_node_frag
*)RB_TREE_MIN(fragtree
);
650 next
= frag_next(fragtree
, this);
651 rb_tree_remove_node(fragtree
, this);
652 chfs_obsolete_node_frag(chmp
, this);
657 /* chfs_truncate_fragtree - truncates the tree to a specified size */
659 chfs_truncate_fragtree(struct chfs_mount
*chmp
,
660 struct rb_tree
*fragtree
, uint32_t size
)
662 KASSERT(mutex_owned(&chmp
->chm_lock_mountfields
));
663 struct chfs_node_frag
*frag
;
665 dbg("truncate to size: %u\n", size
);
667 frag
= (struct chfs_node_frag
*)rb_tree_find_node_leq(fragtree
, &size
);
669 /* Find the last frag before size and set its new size. */
670 if (frag
&& frag
->ofs
!= size
) {
671 if (frag
->ofs
+ frag
->size
> size
) {
672 frag
->size
= size
- frag
->ofs
;
674 frag
= frag_next(fragtree
, frag
);
677 /* Delete frags after new size. */
678 while (frag
&& frag
->ofs
>= size
) {
679 struct chfs_node_frag
*next
= frag_next(fragtree
, frag
);
681 rb_tree_remove_node(fragtree
, frag
);
682 chfs_obsolete_node_frag(chmp
, frag
);
690 frag
= frag_last(fragtree
);
696 if (frag
->ofs
+ frag
->size
< size
) {
697 return frag
->ofs
+ frag
->size
;
700 /* FIXME Should we check the postion of the last node? (PAGE_CACHE size, etc.) */
701 if (frag
->node
&& (frag
->ofs
& (PAGE_SIZE
- 1)) == 0) {
702 frag
->node
->nref
->nref_offset
=
703 CHFS_GET_OFS(frag
->node
->nref
->nref_offset
) | CHFS_PRISTINE_NODE_MASK
;
709 /* chfs_obsolete_node_frag - obsoletes a fragment of a node */
711 chfs_obsolete_node_frag(struct chfs_mount
*chmp
,
712 struct chfs_node_frag
*this)
714 struct chfs_vnode_cache
*vc
;
715 KASSERT(mutex_owned(&chmp
->chm_lock_mountfields
));
717 /* The fragment is in a node. */
718 KASSERT(this->node
->frags
!= 0);
720 if (this->node
->frags
== 0) {
721 /* This is the last fragment. (There is no more.) */
722 KASSERT(!CHFS_REF_OBSOLETE(this->node
->nref
));
723 mutex_enter(&chmp
->chm_lock_vnocache
);
724 vc
= chfs_nref_to_vc(this->node
->nref
);
725 dbg("[MARK] lnr: %u ofs: %u\n", this->node
->nref
->nref_lnr
,
726 this->node
->nref
->nref_offset
);
728 chfs_remove_and_obsolete(chmp
, vc
, this->node
->nref
, &vc
->dnode
);
729 mutex_exit(&chmp
->chm_lock_vnocache
);
731 chfs_free_full_dnode(this->node
);
733 /* There is more frags in the node. */
734 CHFS_MARK_REF_NORMAL(this->node
->nref
);
737 chfs_free_node_frag(this);
740 /* chfs_add_full_dnode_to_inode - adds a data node to an inode */
742 chfs_add_full_dnode_to_inode(struct chfs_mount
*chmp
,
743 struct chfs_inode
*ip
,
744 struct chfs_full_dnode
*fd
)
747 struct chfs_node_frag
*newfrag
;
748 KASSERT(mutex_owned(&chmp
->chm_lock_mountfields
));
750 if (unlikely(!fd
->size
))
753 /* Create a new fragment from the data node and add it to the fragtree. */
754 newfrag
= new_fragment(fd
, fd
->ofs
, fd
->size
);
755 if (unlikely(!newfrag
))
758 ret
= chfs_add_frag_to_fragtree(chmp
, &ip
->fragtree
, newfrag
);
762 /* Check previous fragment. */
763 if (newfrag
->ofs
& (PAGE_SIZE
- 1)) {
764 struct chfs_node_frag
*prev
= frag_prev(&ip
->fragtree
, newfrag
);
766 CHFS_MARK_REF_NORMAL(fd
->nref
);
768 CHFS_MARK_REF_NORMAL(prev
->node
->nref
);
771 /* Check next fragment. */
772 if ((newfrag
->ofs
+newfrag
->size
) & (PAGE_SIZE
- 1)) {
773 struct chfs_node_frag
*next
= frag_next(&ip
->fragtree
, newfrag
);
776 CHFS_MARK_REF_NORMAL(fd
->nref
);
778 CHFS_MARK_REF_NORMAL(next
->node
->nref
);
786 /* chfs_get_data_nodes - get temporary nodes of an inode */
788 chfs_get_data_nodes(struct chfs_mount
*chmp
,
789 struct chfs_inode
*ip
,
790 struct chfs_readinode_info
*rii
)
795 struct chfs_node_ref
*nref
;
796 struct chfs_flash_data_node
*dnode
;
797 struct chfs_tmp_dnode
*td
;
800 len
= sizeof(struct chfs_flash_data_node
);
801 buf
= kmem_alloc(len
, KM_SLEEP
);
803 dnode
= kmem_alloc(len
, KM_SLEEP
);
809 nref
= chfs_first_valid_data_ref(ip
->chvc
->dnode
);
811 /* Update highest version. */
812 rii
->highest_version
= ip
->chvc
->highest_version
;
814 while(nref
&& (struct chfs_vnode_cache
*)nref
!= ip
->chvc
) {
815 err
= chfs_read_leb(chmp
, nref
->nref_lnr
, buf
, CHFS_GET_OFS(nref
->nref_offset
), len
, &retlen
);
816 if (err
|| len
!= retlen
)
818 dnode
= (struct chfs_flash_data_node
*)buf
;
820 /* Check header crc. */
821 crc
= crc32(0, (uint8_t *)dnode
, CHFS_NODE_HDR_SIZE
- 4);
822 if (crc
!= le32toh(dnode
->hdr_crc
)) {
823 chfs_err("CRC check failed. calc: 0x%x orig: 0x%x\n", crc
, le32toh(dnode
->hdr_crc
));
827 /* Check header magic bitmask. */
828 if (le16toh(dnode
->magic
) != CHFS_FS_MAGIC_BITMASK
) {
829 chfs_err("Wrong magic bitmask.\n");
833 /* Check node crc. */
834 crc
= crc32(0, (uint8_t *)dnode
, sizeof(*dnode
) - 4);
835 if (crc
!= le32toh(dnode
->node_crc
)) {
836 chfs_err("Node CRC check failed. calc: 0x%x orig: 0x%x\n", crc
, le32toh(dnode
->node_crc
));
840 td
= chfs_alloc_tmp_dnode();
842 chfs_err("Can't allocate tmp dnode info.\n");
847 /* We don't check data crc here, just add nodes to tmp frag tree, because
848 * we don't want to check nodes which have been overlapped by a new node
849 * with a higher version number.
851 td
->node
= chfs_alloc_full_dnode();
853 chfs_err("Can't allocate full dnode info.\n");
857 td
->version
= le64toh(dnode
->version
);
858 td
->node
->ofs
= le64toh(dnode
->offset
);
859 td
->data_crc
= le32toh(dnode
->data_crc
);
860 td
->node
->nref
= nref
;
861 td
->node
->size
= le32toh(dnode
->data_length
);
865 if (td
->version
> rii
->highest_version
) {
866 rii
->highest_version
= td
->version
;
869 /* Add node to the tree. */
870 err
= chfs_add_tmp_dnode_to_tree(chmp
, rii
, td
);
875 nref
= chfs_first_valid_data_ref(nref
->nref_next
);
878 ip
->chvc
->highest_version
= rii
->highest_version
;
882 chfs_free_full_dnode(td
->node
);
884 chfs_free_tmp_dnode(td
);
887 kmem_free(dnode
, len
);
892 /* chfs_build_fragtree - builds fragtree from temporary tree */
894 chfs_build_fragtree(struct chfs_mount
*chmp
, struct chfs_inode
*ip
,
895 struct chfs_readinode_info
*rii
)
897 struct chfs_tmp_dnode_info
*pen
, *last
, *this;
898 struct rb_tree ver_tree
; /* version tree, used only temporary */
899 uint64_t high_ver
= 0;
900 KASSERT(mutex_owned(&chmp
->chm_lock_mountfields
));
902 rb_tree_init(&ver_tree
, &tmp_node_rbtree_ops
);
904 /* Update highest version and latest node reference. */
906 high_ver
= rii
->mdata_tn
->tmpnode
->version
;
907 rii
->latest_ref
= rii
->mdata_tn
->tmpnode
->node
->nref
;
910 /* Iterate the temporary tree in reverse order. */
911 pen
= (struct chfs_tmp_dnode_info
*)RB_TREE_MAX(&rii
->tdi_root
);
913 while((last
= pen
)) {
914 pen
= (struct chfs_tmp_dnode_info
*)rb_tree_iterate(&rii
->tdi_root
, last
, RB_DIR_LEFT
);
916 /* We build here a version tree from overlapped nodes. */
917 rb_tree_remove_node(&rii
->tdi_root
, last
);
918 rb_tree_insert_node(&ver_tree
, last
);
920 if (last
->tmpnode
->overlapped
) {
924 last
->tmpnode
->overlapped
= 0;
927 this = (struct chfs_tmp_dnode_info
*)RB_TREE_MAX(&ver_tree
);
929 /* Start to build the fragtree. */
931 struct chfs_tmp_dnode_info
*vers_next
;
934 vers_next
= (struct chfs_tmp_dnode_info
*)rb_tree_iterate(&ver_tree
, this, RB_DIR_LEFT
);
935 rb_tree_remove_node(&ver_tree
, this);
937 struct chfs_tmp_dnode
*tmp_td
= this->tmpnode
;
939 struct chfs_tmp_dnode
*next_td
= tmp_td
->next
;
941 /* Check temporary node. */
942 if (chfs_check_td_node(chmp
, tmp_td
)) {
944 chfs_remove_tmp_dnode_from_tdi(this, tmp_td
);
945 chfs_kill_td(chmp
, tmp_td
);
950 if (tmp_td
->version
> high_ver
) {
951 high_ver
= tmp_td
->version
;
952 dbg("highver: %llu\n", (unsigned long long)high_ver
);
953 rii
->latest_ref
= tmp_td
->node
->nref
;
956 /* Add node to inode and its fragtree. */
957 ret
= chfs_add_full_dnode_to_inode(chmp
, ip
, tmp_td
->node
);
959 /* On error, clean the whole version tree. */
961 vers_next
= (struct chfs_tmp_dnode_info
*)rb_tree_iterate(&ver_tree
, this, RB_DIR_LEFT
);
963 next_td
= tmp_td
->next
;
965 chfs_free_full_dnode(tmp_td
->node
);
966 chfs_remove_tmp_dnode_from_tdi(this, tmp_td
);
967 chfs_kill_td(chmp
, tmp_td
);
970 chfs_free_tmp_dnode_info(this);
974 rb_tree_remove_node(&ver_tree
, vers_next
);
975 chfs_kill_tdi(chmp
, vers_next
);
980 /* Remove temporary node from temporary descriptor.
981 * Shouldn't obsolete tmp_td here, because tmp_td->node
982 * was added to the inode. */
983 chfs_remove_tmp_dnode_from_tdi(this, tmp_td
);
984 chfs_free_tmp_dnode(tmp_td
);
988 /* Continue with the previous element of version tree. */
989 chfs_kill_tdi(chmp
, this);
997 /* chfs_read_inode - checks the state of the inode then reads and builds it */
998 int chfs_read_inode(struct chfs_mount
*chmp
, struct chfs_inode
*ip
)
1000 struct chfs_vnode_cache
*vc
= ip
->chvc
;
1002 KASSERT(mutex_owned(&chmp
->chm_lock_mountfields
));
1005 mutex_enter(&chmp
->chm_lock_vnocache
);
1006 switch (vc
->state
) {
1007 case VNO_STATE_UNCHECKED
:
1009 case VNO_STATE_CHECKEDABSENT
:
1010 vc
->state
= VNO_STATE_READING
;
1012 case VNO_STATE_CHECKING
:
1015 mutex_exit(&chmp
->chm_lock_vnocache
);
1018 case VNO_STATE_PRESENT
:
1020 case VNO_STATE_READING
:
1021 chfs_err("Reading inode #%llu in state %d!\n",
1022 (unsigned long long)vc
->vno
, vc
->state
);
1023 chfs_err("wants to read a nonexistent ino %llu\n",
1024 (unsigned long long)vc
->vno
);
1027 panic("BUG() Bad vno cache state.");
1029 mutex_exit(&chmp
->chm_lock_vnocache
);
1031 return chfs_read_inode_internal(chmp
, ip
);
1035 * chfs_read_inode_internal - reads and builds an inode
1036 * Firstly get temporary nodes then build fragtree.
1039 chfs_read_inode_internal(struct chfs_mount
*chmp
, struct chfs_inode
*ip
)
1044 struct chfs_readinode_info rii
;
1045 struct chfs_flash_vnode
*fvnode
;
1047 KASSERT(mutex_owned(&chmp
->chm_lock_mountfields
));
1049 len
= sizeof(*fvnode
);
1051 memset(&rii
, 0, sizeof(rii
));
1053 rb_tree_init(&rii
.tdi_root
, &tmp_node_rbtree_ops
);
1055 /* Build a temporary node tree. */
1056 err
= chfs_get_data_nodes(chmp
, ip
, &rii
);
1058 if (ip
->chvc
->state
== VNO_STATE_READING
)
1059 ip
->chvc
->state
= VNO_STATE_CHECKEDABSENT
;
1060 /* FIXME Should we kill fragtree or something here? */
1064 /* Build fragtree from temp nodes. */
1065 rb_tree_init(&ip
->fragtree
, &frag_rbtree_ops
);
1067 err
= chfs_build_fragtree(chmp
, ip
, &rii
);
1069 if (ip
->chvc
->state
== VNO_STATE_READING
)
1070 ip
->chvc
->state
= VNO_STATE_CHECKEDABSENT
;
1071 /* FIXME Should we kill fragtree or something here? */
1075 if (!rii
.latest_ref
) {
1079 buf
= kmem_alloc(len
, KM_SLEEP
);
1083 /* Set inode size from its vnode information node. */
1084 err
= chfs_read_leb(chmp
, ip
->chvc
->v
->nref_lnr
, buf
, CHFS_GET_OFS(ip
->chvc
->v
->nref_offset
), len
, &retlen
);
1085 if (err
|| retlen
!= len
) {
1086 kmem_free(buf
, len
);
1090 fvnode
= (struct chfs_flash_vnode
*)buf
;
1092 dbg("set size from v: %u\n", fvnode
->dn_size
);
1093 chfs_set_vnode_size(ITOV(ip
), fvnode
->dn_size
);
1094 uint32_t retsize
= chfs_truncate_fragtree(chmp
, &ip
->fragtree
, fvnode
->dn_size
);
1095 if (retsize
!= fvnode
->dn_size
) {
1096 dbg("Truncating failed. It is %u instead of %u\n", retsize
, fvnode
->dn_size
);
1099 kmem_free(buf
, len
);
1101 if (ip
->chvc
->state
== VNO_STATE_READING
) {
1102 ip
->chvc
->state
= VNO_STATE_PRESENT
;
1108 /* chfs_read_data - reads and checks data of a file */
1110 chfs_read_data(struct chfs_mount
* chmp
, struct vnode
*vp
,
1114 struct chfs_node_frag
*frag
;
1117 size_t size
, retlen
;
1119 struct chfs_inode
*ip
= VTOI(vp
);
1120 struct chfs_flash_data_node
*dnode
;
1121 struct chfs_node_ref
*nref
;
1123 memset(bp
->b_data
, 0, bp
->b_bcount
);
1125 /* Calculate the size of the file from its fragtree. */
1126 ofs
= bp
->b_blkno
* PAGE_SIZE
;
1127 frag
= (struct chfs_node_frag
*)rb_tree_find_node_leq(&ip
->fragtree
, &ofs
);
1129 if (!frag
|| frag
->ofs
> ofs
|| frag
->ofs
+ frag
->size
<= ofs
) {
1131 dbg("not found in frag tree\n");
1136 dbg("no node in frag\n");
1140 nref
= frag
->node
->nref
;
1141 size
= sizeof(*dnode
) + frag
->size
;
1143 buf
= kmem_alloc(size
, KM_SLEEP
);
1145 /* Read node from flash. */
1146 dbg("reading from lnr: %u, offset: %u, size: %zu\n", nref
->nref_lnr
, CHFS_GET_OFS(nref
->nref_offset
), size
);
1147 err
= chfs_read_leb(chmp
, nref
->nref_lnr
, buf
, CHFS_GET_OFS(nref
->nref_offset
), size
, &retlen
);
1149 chfs_err("error after reading: %d\n", err
);
1152 if (retlen
!= size
) {
1153 chfs_err("retlen: %zu != size: %zu\n", retlen
, size
);
1158 /* Read data from flash. */
1159 dnode
= (struct chfs_flash_data_node
*)buf
;
1160 crc
= crc32(0, (uint8_t *)dnode
, CHFS_NODE_HDR_SIZE
- 4);
1161 if (crc
!= le32toh(dnode
->hdr_crc
)) {
1162 chfs_err("CRC check failed. calc: 0x%x orig: 0x%x\n", crc
, le32toh(dnode
->hdr_crc
));
1167 /* Check header magic bitmask. */
1168 if (le16toh(dnode
->magic
) != CHFS_FS_MAGIC_BITMASK
) {
1169 chfs_err("Wrong magic bitmask.\n");
1174 /* Check crc of node. */
1175 crc
= crc32(0, (uint8_t *)dnode
, sizeof(*dnode
) - 4);
1176 if (crc
!= le32toh(dnode
->node_crc
)) {
1177 chfs_err("Node CRC check failed. calc: 0x%x orig: 0x%x\n", crc
, le32toh(dnode
->node_crc
));
1182 /* Check crc of data. */
1183 crc
= crc32(0, (uint8_t *)dnode
->data
, dnode
->data_length
);
1184 if (crc
!= le32toh(dnode
->data_crc
)) {
1185 chfs_err("Data CRC check failed. calc: 0x%x orig: 0x%x\n", crc
, le32toh(dnode
->data_crc
));
1190 memcpy(bp
->b_data
, dnode
->data
, dnode
->data_length
);
1194 kmem_free(buf
, size
);