1 /* $NetBSD: chfs_readinode.c,v 1.2 2011/11/24 21:09:37 agc Exp $ */
4 * Copyright (c) 2010 Department of Software Engineering,
5 * University of Szeged, Hungary
6 * Copyright (C) 2010 David Tengeri <dtengeri@inf.u-szeged.hu>
7 * Copyright (C) 2010 Tamas Toth <ttoth@inf.u-szeged.hu>
8 * Copyright (C) 2010 Adam Hoka <ahoka@NetBSD.org>
11 * This code is derived from software contributed to The NetBSD Foundation
12 * by the Department of Software Engineering, University of Szeged, Hungary
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in the
21 * documentation and/or other materials provided with the distribution.
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * Created on: 2010.05.31.
47 /* tmp node operations */
48 int chfs_check_td_data(struct chfs_mount
*,
49 struct chfs_tmp_dnode
*);
50 int chfs_check_td_node(struct chfs_mount
*,
51 struct chfs_tmp_dnode
*);
52 struct chfs_node_ref
*chfs_first_valid_data_ref(struct chfs_node_ref
*);
53 int chfs_add_tmp_dnode_to_tree(struct chfs_mount
*,
54 struct chfs_readinode_info
*,
55 struct chfs_tmp_dnode
*);
56 void chfs_add_tmp_dnode_to_tdi(struct chfs_tmp_dnode_info
*,
57 struct chfs_tmp_dnode
*);
58 void chfs_remove_tmp_dnode_from_tdi(struct chfs_tmp_dnode_info
*,
59 struct chfs_tmp_dnode
*);
60 static void chfs_kill_td(struct chfs_mount
*,
61 struct chfs_tmp_dnode
*);
62 static void chfs_kill_tdi(struct chfs_mount
*,
63 struct chfs_tmp_dnode_info
*);
64 /* frag node operations */
65 struct chfs_node_frag
*new_fragment(struct chfs_full_dnode
*,
68 int no_overlapping_node(struct rb_tree
*, struct chfs_node_frag
*,
69 struct chfs_node_frag
*, uint32_t);
70 int chfs_add_frag_to_fragtree(struct chfs_mount
*,
72 struct chfs_node_frag
*);
73 void chfs_obsolete_node_frag(struct chfs_mount
*,
74 struct chfs_node_frag
*);
75 /* general node operations */
76 int chfs_get_data_nodes(struct chfs_mount
*,
78 struct chfs_readinode_info
*);
79 int chfs_build_fragtree(struct chfs_mount
*,
81 struct chfs_readinode_info
*);
86 * --------------------------
87 * tmp node rbtree operations
88 * --------------------------
91 tmp_node_compare_nodes(void *ctx
, const void *n1
, const void *n2
)
93 const struct chfs_tmp_dnode_info
*tdi1
= n1
;
94 const struct chfs_tmp_dnode_info
*tdi2
= n2
;
96 return (tdi1
->tmpnode
->node
->ofs
- tdi2
->tmpnode
->node
->ofs
);
100 tmp_node_compare_key(void *ctx
, const void *n
, const void *key
)
102 const struct chfs_tmp_dnode_info
*tdi
= n
;
103 uint64_t ofs
= *(const uint64_t *)key
;
105 return (tdi
->tmpnode
->node
->ofs
- ofs
);
108 const rb_tree_ops_t tmp_node_rbtree_ops
= {
109 .rbto_compare_nodes
= tmp_node_compare_nodes
,
110 .rbto_compare_key
= tmp_node_compare_key
,
111 .rbto_node_offset
= offsetof(struct chfs_tmp_dnode_info
, rb_node
),
117 * ---------------------------
118 * frag node rbtree operations
119 * ---------------------------
122 frag_compare_nodes(void *ctx
, const void *n1
, const void *n2
)
124 const struct chfs_node_frag
*frag1
= n1
;
125 const struct chfs_node_frag
*frag2
= n2
;
127 return (frag1
->ofs
- frag2
->ofs
);
131 frag_compare_key(void *ctx
, const void *n
, const void *key
)
133 const struct chfs_node_frag
*frag
= n
;
134 uint64_t ofs
= *(const uint64_t *)key
;
136 return (frag
->ofs
- ofs
);
139 const rb_tree_ops_t frag_rbtree_ops
= {
140 .rbto_compare_nodes
= frag_compare_nodes
,
141 .rbto_compare_key
= frag_compare_key
,
142 .rbto_node_offset
= offsetof(struct chfs_node_frag
, rb_node
),
148 * -------------------
149 * tmp node operations
150 * -------------------
153 * Check the data CRC of the node.
155 * Returns: 0 - if everything OK;
156 * 1 - if CRC is incorrect;
158 * error code if an error occured.
161 chfs_check_td_data(struct chfs_mount
*chmp
,
162 struct chfs_tmp_dnode
*td
)
165 size_t retlen
, len
, totlen
;
169 struct chfs_node_ref
*nref
= td
->node
->nref
;
171 KASSERT(mutex_owned(&chmp
->chm_lock_mountfields
));
172 KASSERT(!mutex_owned(&chmp
->chm_lock_sizes
));
174 ofs
= CHFS_GET_OFS(nref
->nref_offset
) + sizeof(struct chfs_flash_data_node
);
175 len
= td
->node
->size
;
179 buf
= kmem_alloc(len
, KM_SLEEP
);
181 dbg("allocating error\n");
184 err
= chfs_read_leb(chmp
, nref
->nref_lnr
, buf
, ofs
, len
, &retlen
);
186 dbg("error wile reading: %d\n", err
);
192 dbg("len:%zu, retlen:%zu\n", len
, retlen
);
196 crc
= crc32(0, (uint8_t *)buf
, len
);
198 if (crc
!= td
->data_crc
) {
199 dbg("crc failed, calculated: 0x%x, orig: 0x%x\n", crc
, td
->data_crc
);
204 nref
->nref_offset
= CHFS_GET_OFS(nref
->nref_offset
) | CHFS_NORMAL_NODE_MASK
;
205 totlen
= CHFS_PAD(sizeof(struct chfs_flash_data_node
) + len
);
207 mutex_enter(&chmp
->chm_lock_sizes
);
208 chfs_change_size_unchecked(chmp
, &chmp
->chm_blocks
[nref
->nref_lnr
], -totlen
);
209 chfs_change_size_used(chmp
, &chmp
->chm_blocks
[nref
->nref_lnr
], totlen
);
210 mutex_exit(&chmp
->chm_lock_sizes
);
211 KASSERT(chmp
->chm_blocks
[nref
->nref_lnr
].used_size
<= chmp
->chm_ebh
->eb_size
);
220 chfs_check_td_node(struct chfs_mount
*chmp
, struct chfs_tmp_dnode
*td
)
224 if (CHFS_REF_FLAGS(td
->node
->nref
) != CHFS_UNCHECKED_NODE_MASK
)
227 ret
= chfs_check_td_data(chmp
, td
);
229 chfs_mark_node_obsolete(chmp
, td
->node
->nref
);
235 struct chfs_node_ref
*
236 chfs_first_valid_data_ref(struct chfs_node_ref
*nref
)
239 if (!CHFS_REF_OBSOLETE(nref
)) {
241 if (nref
->nref_lnr
== REF_EMPTY_NODE
) {
242 dbg("FIRST VALID IS EMPTY!\n");
248 if (nref
->nref_next
) {
249 nref
= nref
->nref_next
;
257 chfs_add_tmp_dnode_to_tdi(struct chfs_tmp_dnode_info
*tdi
,
258 struct chfs_tmp_dnode
*td
)
263 struct chfs_tmp_dnode
*tmp
= tdi
->tmpnode
;
272 chfs_remove_tmp_dnode_from_tdi(struct chfs_tmp_dnode_info
*tdi
,
273 struct chfs_tmp_dnode
*td
)
275 if (tdi
->tmpnode
== td
) {
276 tdi
->tmpnode
= tdi
->tmpnode
->next
;
278 struct chfs_tmp_dnode
*tmp
= tdi
->tmpnode
->next
;
279 while (tmp
->next
&& tmp
->next
!= td
) {
283 tmp
->next
= td
->next
;
289 chfs_kill_td(struct chfs_mount
*chmp
,
290 struct chfs_tmp_dnode
*td
)
292 /* check if we need to mark as obsolete, to avoid double mark */
293 if (!CHFS_REF_OBSOLETE(td
->node
->nref
)) {
294 chfs_mark_node_obsolete(chmp
, td
->node
->nref
);
297 chfs_free_tmp_dnode(td
);
301 chfs_kill_tdi(struct chfs_mount
*chmp
,
302 struct chfs_tmp_dnode_info
*tdi
)
304 struct chfs_tmp_dnode
*next
, *tmp
= tdi
->tmpnode
;
308 chfs_kill_td(chmp
, tmp
);
312 chfs_free_tmp_dnode_info(tdi
);
316 chfs_add_tmp_dnode_to_tree(struct chfs_mount
*chmp
,
317 struct chfs_readinode_info
*rii
,
318 struct chfs_tmp_dnode
*newtd
)
320 uint64_t end_ofs
= newtd
->node
->ofs
+ newtd
->node
->size
;
321 struct chfs_tmp_dnode_info
*this;
322 struct rb_node
*node
, *prev_node
;
323 struct chfs_tmp_dnode_info
*newtdi
;
325 node
= rb_tree_find_node(&rii
->tdi_root
, &newtd
->node
->ofs
);
327 this = (struct chfs_tmp_dnode_info
*)node
;
328 while (this->tmpnode
->overlapped
) {
329 prev_node
= rb_tree_iterate(&rii
->tdi_root
, node
, RB_DIR_LEFT
);
331 this->tmpnode
->overlapped
= 0;
335 this = (struct chfs_tmp_dnode_info
*)node
;
339 this = (struct chfs_tmp_dnode_info
*)node
;
340 if (this->tmpnode
->node
->ofs
> end_ofs
)
343 struct chfs_tmp_dnode
*tmp_td
= this->tmpnode
;
345 if (tmp_td
->version
== newtd
->version
) {
346 if (!chfs_check_td_node(chmp
, tmp_td
)) {
347 dbg("calling kill td 0\n");
348 chfs_kill_td(chmp
, newtd
);
351 chfs_remove_tmp_dnode_from_tdi(this, tmp_td
);
352 chfs_kill_td(chmp
, tmp_td
);
353 chfs_add_tmp_dnode_to_tdi(this, newtd
);
357 if (tmp_td
->version
< newtd
->version
&&
358 tmp_td
->node
->ofs
>= newtd
->node
->ofs
&&
359 tmp_td
->node
->ofs
+ tmp_td
->node
->size
<= end_ofs
) {
360 /* New node entirely overlaps 'this' */
361 if (chfs_check_td_node(chmp
, newtd
)) {
362 dbg("calling kill td 2\n");
363 chfs_kill_td(chmp
, newtd
);
366 /* ... and is good. Kill 'this' and any subsequent nodes which are also overlapped */
367 while (tmp_td
&& tmp_td
->node
->ofs
+ tmp_td
->node
->size
<= end_ofs
) {
368 struct rb_node
*next
= rb_tree_iterate(&rii
->tdi_root
, this, RB_DIR_RIGHT
);
369 struct chfs_tmp_dnode_info
*next_tdi
= (struct chfs_tmp_dnode_info
*)next
;
370 struct chfs_tmp_dnode
*next_td
= NULL
;
372 next_td
= tmp_td
->next
;
373 } else if (next_tdi
) {
374 next_td
= next_tdi
->tmpnode
;
376 if (tmp_td
->version
< newtd
->version
) {
377 chfs_remove_tmp_dnode_from_tdi(this, tmp_td
);
378 chfs_kill_td(chmp
, tmp_td
);
379 if (!this->tmpnode
) {
380 rb_tree_remove_node(&rii
->tdi_root
, this);
381 chfs_kill_tdi(chmp
, this);
389 if (tmp_td
->version
> newtd
->version
&&
390 tmp_td
->node
->ofs
<= newtd
->node
->ofs
&&
391 tmp_td
->node
->ofs
+ tmp_td
->node
->size
>= end_ofs
) {
392 /* New node entirely overlapped by 'this' */
393 if (!chfs_check_td_node(chmp
, tmp_td
)) {
394 dbg("this version: %llu\n",
395 (unsigned long long)tmp_td
->version
);
396 dbg("this ofs: %llu, size: %u\n",
397 (unsigned long long)tmp_td
->node
->ofs
,
399 dbg("calling kill td 4\n");
400 chfs_kill_td(chmp
, newtd
);
403 /* ... but 'this' was bad. Replace it... */
404 chfs_remove_tmp_dnode_from_tdi(this, tmp_td
);
405 chfs_kill_td(chmp
, tmp_td
);
406 if (!this->tmpnode
) {
407 rb_tree_remove_node(&rii
->tdi_root
, this);
408 chfs_kill_tdi(chmp
, this);
410 dbg("calling kill td 5\n");
411 chfs_kill_td(chmp
, newtd
);
414 tmp_td
= tmp_td
->next
;
416 node
= rb_tree_iterate(&rii
->tdi_root
, node
, RB_DIR_RIGHT
);
419 newtdi
= chfs_alloc_tmp_dnode_info();
420 chfs_add_tmp_dnode_to_tdi(newtdi
, newtd
);
421 /* We neither completely obsoleted nor were completely
422 obsoleted by an earlier node. Insert into the tree */
423 struct chfs_tmp_dnode_info
*tmp_tdi
= rb_tree_insert_node(&rii
->tdi_root
, newtdi
);
424 if (tmp_tdi
!= newtdi
) {
425 chfs_add_tmp_dnode_to_tdi(tmp_tdi
, newtd
);
426 newtdi
->tmpnode
= NULL
;
427 chfs_kill_tdi(chmp
, newtdi
);
430 /* If there's anything behind that overlaps us, note it */
431 node
= rb_tree_iterate(&rii
->tdi_root
, node
, RB_DIR_LEFT
);
434 this = (struct chfs_tmp_dnode_info
*)node
;
435 if (this->tmpnode
->node
->ofs
+ this->tmpnode
->node
->size
> newtd
->node
->ofs
) {
436 newtd
->overlapped
= 1;
438 if (!this->tmpnode
->overlapped
)
441 prev_node
= rb_tree_iterate(&rii
->tdi_root
, node
, RB_DIR_LEFT
);
443 this->tmpnode
->overlapped
= 0;
450 /* If the new node overlaps anything ahead, note it */
451 node
= rb_tree_iterate(&rii
->tdi_root
, node
, RB_DIR_RIGHT
);
452 this = (struct chfs_tmp_dnode_info
*)node
;
453 while (this && this->tmpnode
->node
->ofs
< end_ofs
) {
454 this->tmpnode
->overlapped
= 1;
455 node
= rb_tree_iterate(&rii
->tdi_root
, node
, RB_DIR_RIGHT
);
456 this = (struct chfs_tmp_dnode_info
*)node
;
463 * --------------------
464 * frag node operations
465 * --------------------
467 struct chfs_node_frag
*
468 new_fragment(struct chfs_full_dnode
*fdn
, uint32_t ofs
, uint32_t size
)
470 struct chfs_node_frag
*newfrag
;
471 newfrag
= chfs_alloc_node_frag();
474 newfrag
->size
= size
;
477 chfs_err("cannot allocate a chfs_node_frag object\n");
483 no_overlapping_node(struct rb_tree
*fragtree
,
484 struct chfs_node_frag
*newfrag
,
485 struct chfs_node_frag
*this, uint32_t lastend
)
487 if (lastend
< newfrag
->node
->ofs
) {
488 struct chfs_node_frag
*holefrag
;
490 holefrag
= new_fragment(NULL
, lastend
, newfrag
->node
->ofs
- lastend
);
492 chfs_free_node_frag(newfrag
);
496 rb_tree_insert_node(fragtree
, holefrag
);
500 rb_tree_insert_node(fragtree
, newfrag
);
506 chfs_add_frag_to_fragtree(struct chfs_mount
*chmp
,
507 struct rb_tree
*fragtree
,
508 struct chfs_node_frag
*newfrag
)
510 struct chfs_node_frag
*this;
512 KASSERT(mutex_owned(&chmp
->chm_lock_mountfields
));
514 this = (struct chfs_node_frag
*)rb_tree_find_node_leq(fragtree
, &newfrag
->ofs
);
517 lastend
= this->ofs
+ this->size
;
522 if (lastend
<= newfrag
->ofs
) {
523 //dbg("no overlapping node\n");
524 if (lastend
&& (lastend
- 1) >> PAGE_SHIFT
== newfrag
->ofs
>> PAGE_SHIFT
) {
526 CHFS_MARK_REF_NORMAL(this->node
->nref
);
527 CHFS_MARK_REF_NORMAL(newfrag
->node
->nref
);
529 return no_overlapping_node(fragtree
, newfrag
, this, lastend
);
532 if (newfrag
->ofs
> this->ofs
) {
534 CHFS_MARK_REF_NORMAL(newfrag
->node
->nref
);
536 CHFS_MARK_REF_NORMAL(this->node
->nref
);
538 if (this->ofs
+ this->size
> newfrag
->ofs
+ newfrag
->size
) {
539 /* newfrag is inside of this */
540 //dbg("newfrag is inside of this\n");
541 struct chfs_node_frag
*newfrag2
;
543 newfrag2
= new_fragment(this->node
, newfrag
->ofs
+ newfrag
->size
,
544 this->ofs
+ this->size
- newfrag
->ofs
- newfrag
->size
);
550 this->size
= newfrag
->ofs
- this->ofs
;
552 rb_tree_insert_node(fragtree
, newfrag
);
553 rb_tree_insert_node(fragtree
, newfrag2
);
557 /* newfrag is bottom of this */
558 //dbg("newfrag is bottom of this\n");
559 this->size
= newfrag
->ofs
- this->ofs
;
560 rb_tree_insert_node(fragtree
, newfrag
);
562 /* newfrag start at same point */
563 //dbg("newfrag start at same point\n");
564 //TODO replace instead of remove and insert
565 rb_tree_remove_node(fragtree
, this);
566 rb_tree_insert_node(fragtree
, newfrag
);
568 if (newfrag
->ofs
+ newfrag
->size
>= this->ofs
+this->size
) {
569 chfs_obsolete_node_frag(chmp
, this);
571 this->ofs
+= newfrag
->size
;
572 this->size
-= newfrag
->size
;
574 rb_tree_insert_node(fragtree
, this);
578 /* OK, now we have newfrag added in the correct place in the tree, but
579 frag_next(newfrag) may be a fragment which is overlapped by it
581 while ((this = frag_next(fragtree
, newfrag
)) && newfrag
->ofs
+ newfrag
->size
>= this->ofs
+ this->size
) {
582 rb_tree_remove_node(fragtree
, this);
583 chfs_obsolete_node_frag(chmp
, this);
586 if (!this || newfrag
->ofs
+ newfrag
->size
== this->ofs
)
589 this->size
= (this->ofs
+ this->size
) - (newfrag
->ofs
+ newfrag
->size
);
590 this->ofs
= newfrag
->ofs
+ newfrag
->size
;
593 CHFS_MARK_REF_NORMAL(this->node
->nref
);
594 CHFS_MARK_REF_NORMAL(newfrag
->node
->nref
);
600 chfs_kill_fragtree(struct rb_tree
*fragtree
)
602 struct chfs_node_frag
*this, *next
;
605 this = (struct chfs_node_frag
*)RB_TREE_MIN(fragtree
);
607 //for (this = (struct chfs_node_frag *)RB_TREE_MIN(&fragtree); this != NULL; this = (struct chfs_node_frag *)rb_tree_iterate(&fragtree, &this->rb_node, RB_DIR_RIGHT)) {
608 next
= frag_next(fragtree
, this);
609 rb_tree_remove_node(fragtree
, this);
610 chfs_free_node_frag(this);
611 //dbg("one frag killed\n");
618 chfs_truncate_fragtree(struct chfs_mount
*chmp
,
619 struct rb_tree
*fragtree
, uint32_t size
)
621 struct chfs_node_frag
*frag
;
622 KASSERT(mutex_owned(&chmp
->chm_lock_mountfields
));
624 dbg("truncate to size: %u\n", size
);
626 frag
= (struct chfs_node_frag
*)rb_tree_find_node_leq(fragtree
, &size
);
628 /* Find the last frag before size and set its new size. */
629 if (frag
&& frag
->ofs
!= size
) {
630 if (frag
->ofs
+ frag
->size
> size
) {
631 frag
->size
= size
- frag
->ofs
;
633 frag
= frag_next(fragtree
, frag
);
636 /* Delete frags after new size. */
637 while (frag
&& frag
->ofs
>= size
) {
638 struct chfs_node_frag
*next
= frag_next(fragtree
, frag
);
640 rb_tree_remove_node(fragtree
, frag
);
641 chfs_obsolete_node_frag(chmp
, frag
);
649 frag
= frag_last(fragtree
);
655 if (frag
->ofs
+ frag
->size
< size
) {
656 return frag
->ofs
+ frag
->size
;
659 /* FIXME Should we check the postion of the last node? (PAGE_CACHE size, etc.) */
660 if (frag
->node
&& (frag
->ofs
& (PAGE_SIZE
- 1)) == 0) {
661 frag
->node
->nref
->nref_offset
= CHFS_GET_OFS(frag
->node
->nref
->nref_offset
) | CHFS_PRISTINE_NODE_MASK
;
668 chfs_obsolete_node_frag(struct chfs_mount
*chmp
,
669 struct chfs_node_frag
*this)
671 KASSERT(mutex_owned(&chmp
->chm_lock_mountfields
));
674 if (!this->node
->frags
) {
675 struct chfs_vnode_cache
*vc
= chfs_nref_to_vc(this->node
->nref
);
676 chfs_mark_node_obsolete(chmp
, this->node
->nref
);
678 if (vc
->dnode
== this->node
->nref
) {
679 vc
->dnode
= this->node
->nref
->nref_next
;
681 struct chfs_node_ref
*tmp
= vc
->dnode
;
682 while (tmp
->nref_next
!= (struct chfs_node_ref
*) vc
683 && tmp
->nref_next
!= this->node
->nref
) {
684 tmp
= tmp
->nref_next
;
686 if (tmp
->nref_next
== this->node
->nref
) {
687 tmp
->nref_next
= this->node
->nref
->nref_next
;
689 // FIXME should we free here the this->node->nref?
692 chfs_free_full_dnode(this->node
);
694 CHFS_MARK_REF_NORMAL(this->node
->nref
);
697 chfs_free_node_frag(this);
701 chfs_add_full_dnode_to_inode(struct chfs_mount
*chmp
,
702 struct chfs_inode
*ip
,
703 struct chfs_full_dnode
*fd
)
706 struct chfs_node_frag
*newfrag
;
707 KASSERT(mutex_owned(&chmp
->chm_lock_mountfields
));
709 if (unlikely(!fd
->size
))
712 newfrag
= new_fragment(fd
, fd
->ofs
, fd
->size
);
713 if (unlikely(!newfrag
))
716 newfrag
->node
->frags
= 1;
718 ret
= chfs_add_frag_to_fragtree(chmp
, &ip
->fragtree
, newfrag
);
722 if (newfrag
->ofs
& (PAGE_SIZE
- 1)) {
723 struct chfs_node_frag
*prev
= frag_prev(&ip
->fragtree
, newfrag
);
725 CHFS_MARK_REF_NORMAL(fd
->nref
);
727 CHFS_MARK_REF_NORMAL(prev
->node
->nref
);
730 if ((newfrag
->ofs
+newfrag
->size
) & (PAGE_SIZE
- 1)) {
731 struct chfs_node_frag
*next
= frag_next(&ip
->fragtree
, newfrag
);
734 CHFS_MARK_REF_NORMAL(fd
->nref
);
736 CHFS_MARK_REF_NORMAL(next
->node
->nref
);
745 * -----------------------
746 * general node operations
747 * -----------------------
749 /* get tmp nodes of an inode */
751 chfs_get_data_nodes(struct chfs_mount
*chmp
,
752 struct chfs_inode
*ip
,
753 struct chfs_readinode_info
*rii
)
758 struct chfs_node_ref
*nref
;
759 struct chfs_flash_data_node
*dnode
;
760 struct chfs_tmp_dnode
*td
;
763 len
= sizeof(struct chfs_flash_data_node
);
764 buf
= kmem_alloc(len
, KM_SLEEP
);
766 dnode
= kmem_alloc(len
, KM_SLEEP
);
770 nref
= chfs_first_valid_data_ref(ip
->chvc
->dnode
);
772 rii
->highest_version
= ip
->chvc
->highest_version
;
774 while(nref
&& (struct chfs_vnode_cache
*)nref
!= ip
->chvc
) {
775 err
= chfs_read_leb(chmp
, nref
->nref_lnr
, buf
, CHFS_GET_OFS(nref
->nref_offset
), len
, &retlen
);
776 if (err
|| len
!= retlen
)
778 dnode
= (struct chfs_flash_data_node
*)buf
;
781 crc
= crc32(0, (uint8_t *)dnode
, CHFS_NODE_HDR_SIZE
- 4);
782 if (crc
!= le32toh(dnode
->hdr_crc
)) {
783 chfs_err("CRC check failed. calc: 0x%x orig: 0x%x\n", crc
, le32toh(dnode
->hdr_crc
));
786 //check header magic bitmask
787 if (le16toh(dnode
->magic
) != CHFS_FS_MAGIC_BITMASK
) {
788 chfs_err("Wrong magic bitmask.\n");
792 crc
= crc32(0, (uint8_t *)dnode
, sizeof(*dnode
) - 4);
793 if (crc
!= le32toh(dnode
->node_crc
)) {
794 chfs_err("Node CRC check failed. calc: 0x%x orig: 0x%x\n", crc
, le32toh(dnode
->node_crc
));
797 td
= chfs_alloc_tmp_dnode();
799 chfs_err("Can't allocate tmp dnode info.\n");
803 /* We don't check data crc here, just add nodes to tmp frag tree, because
804 * we don't want to check nodes which have been overlapped by a new node
805 * with a higher version number.
807 td
->node
= chfs_alloc_full_dnode();
809 chfs_err("Can't allocate full dnode info.\n");
813 td
->version
= le64toh(dnode
->version
);
814 td
->node
->ofs
= le64toh(dnode
->offset
);
815 td
->data_crc
= le32toh(dnode
->data_crc
);
816 td
->node
->nref
= nref
;
817 td
->node
->size
= le32toh(dnode
->data_length
);
820 if (td
->version
> rii
->highest_version
) {
821 rii
->highest_version
= td
->version
;
824 err
= chfs_add_tmp_dnode_to_tree(chmp
, rii
, td
);
829 nref
= chfs_first_valid_data_ref(nref
->nref_next
);
832 ip
->chvc
->highest_version
= rii
->highest_version
;
837 chfs_free_full_dnode(td
->node
);
839 chfs_free_tmp_dnode(td
);
842 kmem_free(dnode
, len
);
847 /* Build final normal fragtree from tdi tree. */
849 chfs_build_fragtree(struct chfs_mount
*chmp
, struct chfs_inode
*ip
,
850 struct chfs_readinode_info
*rii
)
852 struct chfs_tmp_dnode_info
*pen
, *last
, *this;
853 struct rb_tree ver_tree
; /* version tree */
854 uint64_t high_ver
= 0;
855 KASSERT(mutex_owned(&chmp
->chm_lock_mountfields
));
857 rb_tree_init(&ver_tree
, &tmp_node_rbtree_ops
);
860 high_ver
= rii
->mdata_tn
->tmpnode
->version
;
861 rii
->latest_ref
= rii
->mdata_tn
->tmpnode
->node
->nref
;
864 pen
= (struct chfs_tmp_dnode_info
*)RB_TREE_MAX(&rii
->tdi_root
);
866 while((last
= pen
)) {
867 pen
= (struct chfs_tmp_dnode_info
*)rb_tree_iterate(&rii
->tdi_root
, last
, RB_DIR_LEFT
);
869 rb_tree_remove_node(&rii
->tdi_root
, last
);
870 rb_tree_insert_node(&ver_tree
, last
);
872 if (last
->tmpnode
->overlapped
) {
876 last
->tmpnode
->overlapped
= 0;
879 this = (struct chfs_tmp_dnode_info
*)RB_TREE_MAX(&ver_tree
);
882 struct chfs_tmp_dnode_info
*vers_next
;
885 vers_next
= (struct chfs_tmp_dnode_info
*)rb_tree_iterate(&ver_tree
, this, RB_DIR_LEFT
);
886 rb_tree_remove_node(&ver_tree
, this);
888 struct chfs_tmp_dnode
*tmp_td
= this->tmpnode
;
890 struct chfs_tmp_dnode
*next_td
= tmp_td
->next
;
892 if (chfs_check_td_node(chmp
, tmp_td
)) {
894 chfs_remove_tmp_dnode_from_tdi(this, tmp_td
);
899 if (tmp_td
->version
> high_ver
) {
900 high_ver
= tmp_td
->version
;
901 dbg("highver: %llu\n", (unsigned long long)high_ver
);
902 rii
->latest_ref
= tmp_td
->node
->nref
;
905 ret
= chfs_add_full_dnode_to_inode(chmp
, ip
, tmp_td
->node
);
908 vers_next
= (struct chfs_tmp_dnode_info
*)rb_tree_iterate(&ver_tree
, this, RB_DIR_LEFT
);
910 next_td
= tmp_td
->next
;
911 if (chfs_check_td_node(chmp
, tmp_td
) > 1) {
912 chfs_mark_node_obsolete(chmp
,
915 chfs_free_full_dnode(tmp_td
->node
);
916 chfs_remove_tmp_dnode_from_tdi(this, tmp_td
);
917 chfs_free_tmp_dnode(tmp_td
);
920 chfs_free_tmp_dnode_info(this);
924 rb_tree_remove_node(&ver_tree
, vers_next
);
929 chfs_remove_tmp_dnode_from_tdi(this, tmp_td
);
930 chfs_free_tmp_dnode(tmp_td
);
934 chfs_kill_tdi(chmp
, this);
942 int chfs_read_inode(struct chfs_mount
*chmp
, struct chfs_inode
*ip
)
944 struct chfs_vnode_cache
*vc
= ip
->chvc
;
946 KASSERT(mutex_owned(&chmp
->chm_lock_mountfields
));
950 //mutex_enter(&chmp->chm_lock_vnocache);
952 case VNO_STATE_UNCHECKED
:
953 case VNO_STATE_CHECKEDABSENT
:
954 // chfs_vnode_cache_set_state(chmp, vc, VNO_STATE_READING);
955 vc
->state
= VNO_STATE_READING
;
957 case VNO_STATE_CHECKING
:
959 //sleep_on_spinunlock(&chmp->chm_lock_vnocache);
960 //KASSERT(!mutex_owned(&chmp->chm_lock_vnocache));
963 case VNO_STATE_PRESENT
:
964 case VNO_STATE_READING
:
965 chfs_err("Reading inode #%llu in state %d!\n",
966 (unsigned long long)vc
->vno
, vc
->state
);
967 chfs_err("wants to read a nonexistent ino %llu\n",
968 (unsigned long long)vc
->vno
);
971 panic("BUG() Bad vno cache state.");
973 //mutex_exit(&chmp->chm_lock_vnocache);
975 return chfs_read_inode_internal(chmp
, ip
);
980 * Firstly get tmp nodes,
981 * secondly build fragtree from those.
984 chfs_read_inode_internal(struct chfs_mount
*chmp
, struct chfs_inode
*ip
)
989 struct chfs_readinode_info rii
;
990 struct chfs_flash_vnode
*fvnode
;
992 KASSERT(mutex_owned(&chmp
->chm_lock_mountfields
));
994 len
= sizeof(*fvnode
);
996 memset(&rii
, 0, sizeof(rii
));
998 rb_tree_init(&rii
.tdi_root
, &tmp_node_rbtree_ops
);
1000 /* build up a temp node frag tree */
1001 err
= chfs_get_data_nodes(chmp
, ip
, &rii
);
1003 if (ip
->chvc
->state
== VNO_STATE_READING
)
1004 ip
->chvc
->state
= VNO_STATE_CHECKEDABSENT
;
1005 /* FIXME Should we kill fragtree or something here? */
1009 rb_tree_init(&ip
->fragtree
, &frag_rbtree_ops
);
1011 * build fragtree from temp nodes
1013 err
= chfs_build_fragtree(chmp
, ip
, &rii
);
1015 if (ip
->chvc
->state
== VNO_STATE_READING
)
1016 ip
->chvc
->state
= VNO_STATE_CHECKEDABSENT
;
1017 /* FIXME Should we kill fragtree or something here? */
1021 if (!rii
.latest_ref
) {
1025 buf
= kmem_alloc(len
, KM_SLEEP
);
1030 * set inode size from chvc->v
1032 err
= chfs_read_leb(chmp
, ip
->chvc
->v
->nref_lnr
, buf
, CHFS_GET_OFS(ip
->chvc
->v
->nref_offset
), len
, &retlen
);
1033 if (err
|| retlen
!= len
) {
1034 kmem_free(buf
, len
);
1038 fvnode
= (struct chfs_flash_vnode
*)buf
;
1040 dbg("set size from v: %u\n", fvnode
->dn_size
);
1041 chfs_set_vnode_size(ITOV(ip
), fvnode
->dn_size
);
1042 uint32_t retsize
= chfs_truncate_fragtree(chmp
, &ip
->fragtree
, fvnode
->dn_size
);
1043 if (retsize
!= fvnode
->dn_size
) {
1044 dbg("Truncating failed. It is %u instead of %u\n", retsize
, fvnode
->dn_size
);
1047 kmem_free(buf
, len
);
1049 if (ip
->chvc
->state
== VNO_STATE_READING
) {
1050 ip
->chvc
->state
= VNO_STATE_PRESENT
;
1057 chfs_read_data(struct chfs_mount
* chmp
, struct vnode
*vp
,
1061 struct chfs_node_frag
*frag
;
1064 size_t size
, retlen
;
1066 struct chfs_inode
*ip
= VTOI(vp
);
1067 struct chfs_flash_data_node
*dnode
;
1068 struct chfs_node_ref
*nref
;
1070 memset(bp
->b_data
, 0, bp
->b_bcount
);
1072 ofs
= bp
->b_blkno
* PAGE_SIZE
;
1073 frag
= (struct chfs_node_frag
*)rb_tree_find_node_leq(&ip
->fragtree
, &ofs
);
1075 if (!frag
|| frag
->ofs
> ofs
|| frag
->ofs
+ frag
->size
<= ofs
) {
1076 dbg("not found in frag tree\n");
1081 dbg("no node in frag\n");
1085 nref
= frag
->node
->nref
;
1087 size
= sizeof(*dnode
) + frag
->size
;
1089 buf
= kmem_alloc(size
, KM_SLEEP
);
1091 dbg("reading from lnr: %u, offset: %u, size: %zu\n", nref
->nref_lnr
, CHFS_GET_OFS(nref
->nref_offset
), size
);
1092 err
= chfs_read_leb(chmp
, nref
->nref_lnr
, buf
, CHFS_GET_OFS(nref
->nref_offset
), size
, &retlen
);
1094 chfs_err("error after reading: %d\n", err
);
1097 if (retlen
!= size
) {
1098 chfs_err("retlen: %zu != size: %zu\n", retlen
, size
);
1103 dnode
= (struct chfs_flash_data_node
*)buf
;
1104 crc
= crc32(0, (uint8_t *)dnode
, CHFS_NODE_HDR_SIZE
- 4);
1105 if (crc
!= le32toh(dnode
->hdr_crc
)) {
1106 chfs_err("CRC check failed. calc: 0x%x orig: 0x%x\n", crc
, le32toh(dnode
->hdr_crc
));
1110 //check header magic bitmask
1111 if (le16toh(dnode
->magic
) != CHFS_FS_MAGIC_BITMASK
) {
1112 chfs_err("Wrong magic bitmask.\n");
1117 crc
= crc32(0, (uint8_t *)dnode
, sizeof(*dnode
) - 4);
1118 if (crc
!= le32toh(dnode
->node_crc
)) {
1119 chfs_err("Node CRC check failed. calc: 0x%x orig: 0x%x\n", crc
, le32toh(dnode
->node_crc
));
1123 crc
= crc32(0, (uint8_t *)dnode
->data
, dnode
->data_length
);
1124 if (crc
!= le32toh(dnode
->data_crc
)) {
1125 chfs_err("Data CRC check failed. calc: 0x%x orig: 0x%x\n", crc
, le32toh(dnode
->data_crc
));
1130 memcpy(bp
->b_data
, dnode
->data
, dnode
->data_length
);
1134 kmem_free(buf
, size
);