etc/protocols - sync with NetBSD-8
[minix.git] / sys / ufs / chfs / chfs_readinode.c
blob89e4dcfcf86f075859d92c6bbe5f84afae85ee0b
1 /* $NetBSD: chfs_readinode.c,v 1.9 2014/09/01 16:31:17 he Exp $ */
3 /*-
4 * Copyright (c) 2010 Department of Software Engineering,
5 * University of Szeged, Hungary
6 * Copyright (C) 2010 David Tengeri <dtengeri@inf.u-szeged.hu>
7 * Copyright (C) 2010 Tamas Toth <ttoth@inf.u-szeged.hu>
8 * Copyright (C) 2010 Adam Hoka <ahoka@NetBSD.org>
9 * All rights reserved.
11 * This code is derived from software contributed to The NetBSD Foundation
12 * by the Department of Software Engineering, University of Szeged, Hungary
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions
16 * are met:
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in the
21 * documentation and/or other materials provided with the distribution.
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
36 #include <sys/buf.h>
38 #include "chfs.h"
40 /* tmp node operations */
41 int chfs_check_td_data(struct chfs_mount *,
42 struct chfs_tmp_dnode *);
43 int chfs_check_td_node(struct chfs_mount *,
44 struct chfs_tmp_dnode *);
45 struct chfs_node_ref *chfs_first_valid_data_ref(struct chfs_node_ref *);
46 int chfs_add_tmp_dnode_to_tree(struct chfs_mount *,
47 struct chfs_readinode_info *,
48 struct chfs_tmp_dnode *);
49 void chfs_add_tmp_dnode_to_tdi(struct chfs_tmp_dnode_info *,
50 struct chfs_tmp_dnode *);
51 void chfs_remove_tmp_dnode_from_tdi(struct chfs_tmp_dnode_info *,
52 struct chfs_tmp_dnode *);
53 static void chfs_kill_td(struct chfs_mount *,
54 struct chfs_tmp_dnode *);
55 static void chfs_kill_tdi(struct chfs_mount *,
56 struct chfs_tmp_dnode_info *);
57 /* frag node operations */
58 struct chfs_node_frag *new_fragment(struct chfs_full_dnode *,
59 uint32_t,
60 uint32_t);
61 int no_overlapping_node(struct rb_tree *, struct chfs_node_frag *,
62 struct chfs_node_frag *, uint32_t);
63 int chfs_add_frag_to_fragtree(struct chfs_mount *,
64 struct rb_tree *,
65 struct chfs_node_frag *);
66 void chfs_obsolete_node_frag(struct chfs_mount *,
67 struct chfs_node_frag *);
68 /* general node operations */
69 int chfs_get_data_nodes(struct chfs_mount *,
70 struct chfs_inode *,
71 struct chfs_readinode_info *);
72 int chfs_build_fragtree(struct chfs_mount *,
73 struct chfs_inode *,
74 struct chfs_readinode_info *);
78 /* tmp node rbtree operations */
79 static signed int
80 tmp_node_compare_nodes(void *ctx, const void *n1, const void *n2)
82 const struct chfs_tmp_dnode_info *tdi1 = n1;
83 const struct chfs_tmp_dnode_info *tdi2 = n2;
85 return (tdi1->tmpnode->node->ofs - tdi2->tmpnode->node->ofs);
88 static signed int
89 tmp_node_compare_key(void *ctx, const void *n, const void *key)
91 const struct chfs_tmp_dnode_info *tdi = n;
92 uint64_t ofs = *(const uint64_t *)key;
94 return (tdi->tmpnode->node->ofs - ofs);
97 const rb_tree_ops_t tmp_node_rbtree_ops = {
98 .rbto_compare_nodes = tmp_node_compare_nodes,
99 .rbto_compare_key = tmp_node_compare_key,
100 .rbto_node_offset = offsetof(struct chfs_tmp_dnode_info, rb_node),
101 .rbto_context = NULL
105 /* frag node rbtree operations */
106 static signed int
107 frag_compare_nodes(void *ctx, const void *n1, const void *n2)
109 const struct chfs_node_frag *frag1 = n1;
110 const struct chfs_node_frag *frag2 = n2;
112 return (frag1->ofs - frag2->ofs);
115 static signed int
116 frag_compare_key(void *ctx, const void *n, const void *key)
118 const struct chfs_node_frag *frag = n;
119 uint64_t ofs = *(const uint64_t *)key;
121 return (frag->ofs - ofs);
124 const rb_tree_ops_t frag_rbtree_ops = {
125 .rbto_compare_nodes = frag_compare_nodes,
126 .rbto_compare_key = frag_compare_key,
127 .rbto_node_offset = offsetof(struct chfs_node_frag, rb_node),
128 .rbto_context = NULL
133 * chfs_check_td_data - checks the data CRC of the node
135 * Returns: 0 - if everything OK;
136 * 1 - if CRC is incorrect;
137 * 2 - else;
138 * error code if an error occured.
141 chfs_check_td_data(struct chfs_mount *chmp,
142 struct chfs_tmp_dnode *td)
144 int err;
145 size_t retlen, len, totlen;
146 uint32_t crc;
147 uint64_t ofs;
148 char *buf;
149 struct chfs_node_ref *nref = td->node->nref;
151 KASSERT(mutex_owned(&chmp->chm_lock_mountfields));
152 KASSERT(!mutex_owned(&chmp->chm_lock_sizes));
154 ofs = CHFS_GET_OFS(nref->nref_offset) + sizeof(struct chfs_flash_data_node);
155 len = td->node->size;
156 if (!len)
157 return 0;
159 /* Read data. */
160 buf = kmem_alloc(len, KM_SLEEP);
161 if (!buf) {
162 dbg("allocating error\n");
163 return 2;
165 err = chfs_read_leb(chmp, nref->nref_lnr, buf, ofs, len, &retlen);
166 if (err) {
167 dbg("error while reading: %d\n", err);
168 err = 2;
169 goto out;
172 /* Check crc. */
173 if (len != retlen) {
174 dbg("len:%zu, retlen:%zu\n", len, retlen);
175 err = 2;
176 goto out;
178 crc = crc32(0, (uint8_t *)buf, len);
180 if (crc != td->data_crc) {
181 dbg("crc failed, calculated: 0x%x, orig: 0x%x\n", crc, td->data_crc);
182 kmem_free(buf, len);
183 return 1;
186 /* Correct sizes. */
187 CHFS_MARK_REF_NORMAL(nref);
188 totlen = CHFS_PAD(sizeof(struct chfs_flash_data_node) + len);
190 mutex_enter(&chmp->chm_lock_sizes);
191 chfs_change_size_unchecked(chmp, &chmp->chm_blocks[nref->nref_lnr], -totlen);
192 chfs_change_size_used(chmp, &chmp->chm_blocks[nref->nref_lnr], totlen);
193 mutex_exit(&chmp->chm_lock_sizes);
194 KASSERT(chmp->chm_blocks[nref->nref_lnr].used_size <= chmp->chm_ebh->eb_size);
196 err = 0;
197 out:
198 kmem_free(buf, len);
199 return err;
202 /* chfs_check_td_node - checks a temporary node */
204 chfs_check_td_node(struct chfs_mount *chmp, struct chfs_tmp_dnode *td)
206 int ret;
208 if (CHFS_REF_FLAGS(td->node->nref) != CHFS_UNCHECKED_NODE_MASK)
209 return 0;
211 ret = chfs_check_td_data(chmp, td);
212 return ret;
216 * chfs_first_valid_data_ref -
217 * returns the first valid nref after the given nref
219 struct chfs_node_ref *
220 chfs_first_valid_data_ref(struct chfs_node_ref *nref)
222 while (nref) {
223 if (!CHFS_REF_OBSOLETE(nref)) {
224 #ifdef DGB_MSG_GC
225 if (nref->nref_lnr == REF_EMPTY_NODE) {
226 dbg("FIRST VALID IS EMPTY!\n");
228 #endif
229 return nref;
232 if (nref->nref_next) {
233 nref = nref->nref_next;
234 } else
235 break;
237 return NULL;
241 * chfs_add_tmp_dnode_to_tdi -
242 * adds a temporary node to a temporary node descriptor
244 void
245 chfs_add_tmp_dnode_to_tdi(struct chfs_tmp_dnode_info *tdi,
246 struct chfs_tmp_dnode *td)
248 if (!tdi->tmpnode) {
249 /* The chain is empty. */
250 tdi->tmpnode = td;
251 } else {
252 /* Insert into the chain. */
253 struct chfs_tmp_dnode *tmp = tdi->tmpnode;
254 while (tmp->next) {
255 tmp = tmp->next;
257 tmp->next = td;
262 * chfs_remove_tmp_dnode_from_tdi -
263 * removes a temporary node from its descriptor
265 void
266 chfs_remove_tmp_dnode_from_tdi(struct chfs_tmp_dnode_info *tdi,
267 struct chfs_tmp_dnode *td)
269 if (tdi->tmpnode == td) {
270 /* It's the first in the chain. */
271 tdi->tmpnode = tdi->tmpnode->next;
272 } else {
273 /* Remove from the middle of the chain. */
274 struct chfs_tmp_dnode *tmp = tdi->tmpnode->next;
275 while (tmp->next && tmp->next != td) {
276 tmp = tmp->next;
278 if (tmp->next) {
279 tmp->next = td->next;
284 /* chfs_kill_td - removes all components of a temporary node */
285 static void
286 chfs_kill_td(struct chfs_mount *chmp,
287 struct chfs_tmp_dnode *td)
289 struct chfs_vnode_cache *vc;
290 if (td->node) {
291 mutex_enter(&chmp->chm_lock_vnocache);
292 /* Remove the node from the vnode cache's data node chain. */
293 vc = chfs_nref_to_vc(td->node->nref);
294 chfs_remove_and_obsolete(chmp, vc, td->node->nref, &vc->dnode);
295 mutex_exit(&chmp->chm_lock_vnocache);
298 chfs_free_tmp_dnode(td);
301 /* chfs_kill_tdi - removes a temporary node descriptor */
302 static void
303 chfs_kill_tdi(struct chfs_mount *chmp,
304 struct chfs_tmp_dnode_info *tdi)
306 struct chfs_tmp_dnode *next, *tmp = tdi->tmpnode;
308 /* Iterate the chain and remove all temporary node from it. */
309 while (tmp) {
310 next = tmp->next;
311 chfs_kill_td(chmp, tmp);
312 tmp = next;
315 chfs_free_tmp_dnode_info(tdi);
319 * chfs_add_tmp_dnode_to_tree -
320 * adds a temporary node to the temporary tree
323 chfs_add_tmp_dnode_to_tree(struct chfs_mount *chmp,
324 struct chfs_readinode_info *rii,
325 struct chfs_tmp_dnode *newtd)
327 uint64_t end_ofs = newtd->node->ofs + newtd->node->size;
328 struct chfs_tmp_dnode_info *this;
329 struct rb_node *node, *prev_node;
330 struct chfs_tmp_dnode_info *newtdi;
332 node = rb_tree_find_node(&rii->tdi_root, &newtd->node->ofs);
333 if (node) {
334 this = (struct chfs_tmp_dnode_info *)node;
335 while (this->tmpnode->overlapped) {
336 prev_node = rb_tree_iterate(&rii->tdi_root, node, RB_DIR_LEFT);
337 if (!prev_node) {
338 this->tmpnode->overlapped = 0;
339 break;
341 node = prev_node;
342 this = (struct chfs_tmp_dnode_info *)node;
346 while (node) {
347 this = (struct chfs_tmp_dnode_info *)node;
348 if (this->tmpnode->node->ofs > end_ofs)
349 break;
351 struct chfs_tmp_dnode *tmp_td = this->tmpnode;
352 while (tmp_td) {
353 if (tmp_td->version == newtd->version) {
354 /* This is a new version of an old node. */
355 if (!chfs_check_td_node(chmp, tmp_td)) {
356 dbg("calling kill td 0\n");
357 chfs_kill_td(chmp, newtd);
358 return 0;
359 } else {
360 chfs_remove_tmp_dnode_from_tdi(this, tmp_td);
361 chfs_kill_td(chmp, tmp_td);
362 chfs_add_tmp_dnode_to_tdi(this, newtd);
363 return 0;
366 if (tmp_td->version < newtd->version &&
367 tmp_td->node->ofs >= newtd->node->ofs &&
368 tmp_td->node->ofs + tmp_td->node->size <= end_ofs) {
369 /* New node entirely overlaps 'this' */
370 if (chfs_check_td_node(chmp, newtd)) {
371 dbg("calling kill td 2\n");
372 chfs_kill_td(chmp, newtd);
373 return 0;
375 /* ... and is good. Kill 'this' and any subsequent nodes which are also overlapped */
376 while (tmp_td && tmp_td->node->ofs + tmp_td->node->size <= end_ofs) {
377 struct rb_node *next = rb_tree_iterate(&rii->tdi_root, this, RB_DIR_RIGHT);
378 struct chfs_tmp_dnode_info *next_tdi = (struct chfs_tmp_dnode_info *)next;
379 struct chfs_tmp_dnode *next_td = NULL;
380 if (tmp_td->next) {
381 next_td = tmp_td->next;
382 } else if (next_tdi) {
383 next_td = next_tdi->tmpnode;
385 if (tmp_td->version < newtd->version) {
386 chfs_remove_tmp_dnode_from_tdi(this, tmp_td);
387 chfs_kill_td(chmp, tmp_td);
388 if (!this->tmpnode) {
389 rb_tree_remove_node(&rii->tdi_root, this);
390 chfs_kill_tdi(chmp, this);
391 this = next_tdi;
394 tmp_td = next_td;
396 continue;
398 if (tmp_td->version > newtd->version &&
399 tmp_td->node->ofs <= newtd->node->ofs &&
400 tmp_td->node->ofs + tmp_td->node->size >= end_ofs) {
401 /* New node entirely overlapped by 'this' */
402 if (!chfs_check_td_node(chmp, tmp_td)) {
403 dbg("this version: %llu\n",
404 (unsigned long long)tmp_td->version);
405 dbg("this ofs: %llu, size: %u\n",
406 (unsigned long long)tmp_td->node->ofs,
407 tmp_td->node->size);
408 dbg("calling kill td 4\n");
409 chfs_kill_td(chmp, newtd);
410 return 0;
412 /* ... but 'this' was bad. Replace it... */
413 chfs_remove_tmp_dnode_from_tdi(this, tmp_td);
414 chfs_kill_td(chmp, tmp_td);
415 if (!this->tmpnode) {
416 rb_tree_remove_node(&rii->tdi_root, this);
417 chfs_kill_tdi(chmp, this);
419 dbg("calling kill td 5\n");
420 chfs_kill_td(chmp, newtd);
421 break;
423 tmp_td = tmp_td->next;
425 node = rb_tree_iterate(&rii->tdi_root, node, RB_DIR_RIGHT);
428 newtdi = chfs_alloc_tmp_dnode_info();
429 chfs_add_tmp_dnode_to_tdi(newtdi, newtd);
430 /* We neither completely obsoleted nor were completely
431 obsoleted by an earlier node. Insert into the tree */
432 struct chfs_tmp_dnode_info *tmp_tdi = rb_tree_insert_node(&rii->tdi_root, newtdi);
433 if (tmp_tdi != newtdi) {
434 chfs_remove_tmp_dnode_from_tdi(newtdi, newtd);
435 chfs_add_tmp_dnode_to_tdi(tmp_tdi, newtd);
436 chfs_kill_tdi(chmp, newtdi);
439 /* If there's anything behind that overlaps us, note it */
440 node = rb_tree_iterate(&rii->tdi_root, node, RB_DIR_LEFT);
441 if (node) {
442 while (1) {
443 this = (struct chfs_tmp_dnode_info *)node;
444 if (this->tmpnode->node->ofs + this->tmpnode->node->size > newtd->node->ofs) {
445 newtd->overlapped = 1;
447 if (!this->tmpnode->overlapped)
448 break;
450 prev_node = rb_tree_iterate(&rii->tdi_root, node, RB_DIR_LEFT);
451 if (!prev_node) {
452 this->tmpnode->overlapped = 0;
453 break;
455 node = prev_node;
459 /* If the new node overlaps anything ahead, note it */
460 node = rb_tree_iterate(&rii->tdi_root, node, RB_DIR_RIGHT);
461 this = (struct chfs_tmp_dnode_info *)node;
462 while (this && this->tmpnode->node->ofs < end_ofs) {
463 this->tmpnode->overlapped = 1;
464 node = rb_tree_iterate(&rii->tdi_root, node, RB_DIR_RIGHT);
465 this = (struct chfs_tmp_dnode_info *)node;
467 return 0;
471 /* new_fragment - creates a new fragment for a data node */
472 struct chfs_node_frag *
473 new_fragment(struct chfs_full_dnode *fdn, uint32_t ofs, uint32_t size)
475 struct chfs_node_frag *newfrag;
476 newfrag = chfs_alloc_node_frag();
477 if (newfrag) {
478 /* Initialize fragment. */
479 newfrag->ofs = ofs;
480 newfrag->size = size;
481 newfrag->node = fdn;
482 if (newfrag->node) {
483 newfrag->node->frags++;
485 } else {
486 chfs_err("cannot allocate a chfs_node_frag object\n");
488 return newfrag;
492 * no_overlapping_node - inserts a node to the fragtree
493 * Puts hole frag into the holes between fragments.
496 no_overlapping_node(struct rb_tree *fragtree,
497 struct chfs_node_frag *newfrag,
498 struct chfs_node_frag *this, uint32_t lastend)
500 if (lastend < newfrag->node->ofs) {
501 struct chfs_node_frag *holefrag;
503 holefrag = new_fragment(NULL, lastend, newfrag->node->ofs - lastend);
504 if (!holefrag) {
505 chfs_free_node_frag(newfrag);
506 return ENOMEM;
509 rb_tree_insert_node(fragtree, holefrag);
512 rb_tree_insert_node(fragtree, newfrag);
514 return 0;
518 * chfs_add_frag_to_fragtree -
519 * adds a fragment to a data node's fragtree
522 chfs_add_frag_to_fragtree(struct chfs_mount *chmp,
523 struct rb_tree *fragtree,
524 struct chfs_node_frag *newfrag)
526 struct chfs_node_frag *this;
527 uint32_t lastend;
528 KASSERT(mutex_owned(&chmp->chm_lock_mountfields));
530 /* Find the offset of frag which is before the new one. */
531 this = (struct chfs_node_frag *)rb_tree_find_node_leq(fragtree, &newfrag->ofs);
533 if (this) {
534 lastend = this->ofs + this->size;
535 } else {
536 lastend = 0;
539 /* New fragment is end of the file and there is no overlapping. */
540 if (lastend <= newfrag->ofs) {
541 if (lastend && (lastend - 1) >> PAGE_SHIFT == newfrag->ofs >> PAGE_SHIFT) {
542 if (this->node)
543 CHFS_MARK_REF_NORMAL(this->node->nref);
544 CHFS_MARK_REF_NORMAL(newfrag->node->nref);
546 return no_overlapping_node(fragtree, newfrag, this, lastend);
549 if (newfrag->ofs > this->ofs) {
550 CHFS_MARK_REF_NORMAL(newfrag->node->nref);
551 if (this->node)
552 CHFS_MARK_REF_NORMAL(this->node->nref);
554 if (this->ofs + this->size > newfrag->ofs + newfrag->size) {
555 /* Newfrag is inside of this. */
556 struct chfs_node_frag *newfrag2;
558 newfrag2 = new_fragment(this->node, newfrag->ofs + newfrag->size,
559 this->ofs + this->size - newfrag->ofs - newfrag->size);
560 if (!newfrag2)
561 return ENOMEM;
563 this->size = newfrag->ofs - this->ofs;
565 rb_tree_insert_node(fragtree, newfrag);
566 rb_tree_insert_node(fragtree, newfrag2);
568 return 0;
570 /* Newfrag is bottom of this. */
571 this->size = newfrag->ofs - this->ofs;
572 rb_tree_insert_node(fragtree, newfrag);
573 } else {
574 /* Newfrag start at same point */
575 //TODO replace instead of remove and insert
576 rb_tree_remove_node(fragtree, this);
577 rb_tree_insert_node(fragtree, newfrag);
579 if (newfrag->ofs + newfrag->size >= this->ofs+this->size) {
580 chfs_obsolete_node_frag(chmp, this);
581 } else {
582 this->ofs += newfrag->size;
583 this->size -= newfrag->size;
585 rb_tree_insert_node(fragtree, this);
586 return 0;
589 /* OK, now we have newfrag added in the correct place in the tree, but
590 frag_next(newfrag) may be a fragment which is overlapped by it
592 while ((this = frag_next(fragtree, newfrag)) && newfrag->ofs + newfrag->size >= this->ofs + this->size) {
593 rb_tree_remove_node(fragtree, this);
594 chfs_obsolete_node_frag(chmp, this);
597 if (!this || newfrag->ofs + newfrag->size == this->ofs)
598 return 0;
600 this->size = (this->ofs + this->size) - (newfrag->ofs + newfrag->size);
601 this->ofs = newfrag->ofs + newfrag->size;
603 if (this->node)
604 CHFS_MARK_REF_NORMAL(this->node->nref);
605 CHFS_MARK_REF_NORMAL(newfrag->node->nref);
607 return 0;
611 * chfs_remove_frags_of_node -
612 * removes all fragments from a fragtree and DOESN'T OBSOLETE them
614 void
615 chfs_remove_frags_of_node(struct chfs_mount *chmp, struct rb_tree *fragtree,
616 struct chfs_node_ref *nref)
618 KASSERT(mutex_owned(&chmp->chm_lock_mountfields));
619 struct chfs_node_frag *this, *next;
621 if (nref == NULL) {
622 return;
625 /* Iterate the tree and clean all elements. */
626 this = (struct chfs_node_frag *)RB_TREE_MIN(fragtree);
627 while (this) {
628 next = frag_next(fragtree, this);
629 if (this->node->nref == nref) {
630 rb_tree_remove_node(fragtree, this);
631 chfs_free_node_frag(this);
633 this = next;
638 * chfs_kill_fragtree -
639 * removes all fragments from a fragtree and OBSOLETES them
641 void
642 chfs_kill_fragtree(struct chfs_mount *chmp, struct rb_tree *fragtree)
644 KASSERT(mutex_owned(&chmp->chm_lock_mountfields));
645 struct chfs_node_frag *this, *next;
647 /* Iterate the tree and clean all elements. */
648 this = (struct chfs_node_frag *)RB_TREE_MIN(fragtree);
649 while (this) {
650 next = frag_next(fragtree, this);
651 rb_tree_remove_node(fragtree, this);
652 chfs_obsolete_node_frag(chmp, this);
653 this = next;
657 /* chfs_truncate_fragtree - truncates the tree to a specified size */
658 uint32_t
659 chfs_truncate_fragtree(struct chfs_mount *chmp,
660 struct rb_tree *fragtree, uint32_t size)
662 KASSERT(mutex_owned(&chmp->chm_lock_mountfields));
663 struct chfs_node_frag *frag;
665 dbg("truncate to size: %u\n", size);
667 frag = (struct chfs_node_frag *)rb_tree_find_node_leq(fragtree, &size);
669 /* Find the last frag before size and set its new size. */
670 if (frag && frag->ofs != size) {
671 if (frag->ofs + frag->size > size) {
672 frag->size = size - frag->ofs;
674 frag = frag_next(fragtree, frag);
677 /* Delete frags after new size. */
678 while (frag && frag->ofs >= size) {
679 struct chfs_node_frag *next = frag_next(fragtree, frag);
681 rb_tree_remove_node(fragtree, frag);
682 chfs_obsolete_node_frag(chmp, frag);
683 frag = next;
686 if (size == 0) {
687 return 0;
690 frag = frag_last(fragtree);
692 if (!frag) {
693 return 0;
696 if (frag->ofs + frag->size < size) {
697 return frag->ofs + frag->size;
700 /* FIXME Should we check the postion of the last node? (PAGE_CACHE size, etc.) */
701 if (frag->node && (frag->ofs & (PAGE_SIZE - 1)) == 0) {
702 frag->node->nref->nref_offset =
703 CHFS_GET_OFS(frag->node->nref->nref_offset) | CHFS_PRISTINE_NODE_MASK;
706 return size;
709 /* chfs_obsolete_node_frag - obsoletes a fragment of a node */
710 void
711 chfs_obsolete_node_frag(struct chfs_mount *chmp,
712 struct chfs_node_frag *this)
714 struct chfs_vnode_cache *vc;
715 KASSERT(mutex_owned(&chmp->chm_lock_mountfields));
716 if (this->node) {
717 /* The fragment is in a node. */
718 KASSERT(this->node->frags != 0);
719 this->node->frags--;
720 if (this->node->frags == 0) {
721 /* This is the last fragment. (There is no more.) */
722 KASSERT(!CHFS_REF_OBSOLETE(this->node->nref));
723 mutex_enter(&chmp->chm_lock_vnocache);
724 vc = chfs_nref_to_vc(this->node->nref);
725 dbg("[MARK] lnr: %u ofs: %u\n", this->node->nref->nref_lnr,
726 this->node->nref->nref_offset);
728 chfs_remove_and_obsolete(chmp, vc, this->node->nref, &vc->dnode);
729 mutex_exit(&chmp->chm_lock_vnocache);
731 chfs_free_full_dnode(this->node);
732 } else {
733 /* There is more frags in the node. */
734 CHFS_MARK_REF_NORMAL(this->node->nref);
737 chfs_free_node_frag(this);
740 /* chfs_add_full_dnode_to_inode - adds a data node to an inode */
742 chfs_add_full_dnode_to_inode(struct chfs_mount *chmp,
743 struct chfs_inode *ip,
744 struct chfs_full_dnode *fd)
746 int ret;
747 struct chfs_node_frag *newfrag;
748 KASSERT(mutex_owned(&chmp->chm_lock_mountfields));
750 if (unlikely(!fd->size))
751 return 0;
753 /* Create a new fragment from the data node and add it to the fragtree. */
754 newfrag = new_fragment(fd, fd->ofs, fd->size);
755 if (unlikely(!newfrag))
756 return ENOMEM;
758 ret = chfs_add_frag_to_fragtree(chmp, &ip->fragtree, newfrag);
759 if (ret)
760 return ret;
762 /* Check previous fragment. */
763 if (newfrag->ofs & (PAGE_SIZE - 1)) {
764 struct chfs_node_frag *prev = frag_prev(&ip->fragtree, newfrag);
766 CHFS_MARK_REF_NORMAL(fd->nref);
767 if (prev->node)
768 CHFS_MARK_REF_NORMAL(prev->node->nref);
771 /* Check next fragment. */
772 if ((newfrag->ofs+newfrag->size) & (PAGE_SIZE - 1)) {
773 struct chfs_node_frag *next = frag_next(&ip->fragtree, newfrag);
775 if (next) {
776 CHFS_MARK_REF_NORMAL(fd->nref);
777 if (next->node)
778 CHFS_MARK_REF_NORMAL(next->node->nref);
782 return 0;
786 /* chfs_get_data_nodes - get temporary nodes of an inode */
788 chfs_get_data_nodes(struct chfs_mount *chmp,
789 struct chfs_inode *ip,
790 struct chfs_readinode_info *rii)
792 uint32_t crc;
793 int err;
794 size_t len, retlen;
795 struct chfs_node_ref *nref;
796 struct chfs_flash_data_node *dnode;
797 struct chfs_tmp_dnode *td;
798 char* buf;
800 len = sizeof(struct chfs_flash_data_node);
801 buf = kmem_alloc(len, KM_SLEEP);
803 dnode = kmem_alloc(len, KM_SLEEP);
804 if (!dnode) {
805 kmem_free(buf, len);
806 return ENOMEM;
809 nref = chfs_first_valid_data_ref(ip->chvc->dnode);
811 /* Update highest version. */
812 rii->highest_version = ip->chvc->highest_version;
814 while(nref && (struct chfs_vnode_cache *)nref != ip->chvc) {
815 err = chfs_read_leb(chmp, nref->nref_lnr, buf, CHFS_GET_OFS(nref->nref_offset), len, &retlen);
816 if (err || len != retlen)
817 goto out;
818 dnode = (struct chfs_flash_data_node*)buf;
820 /* Check header crc. */
821 crc = crc32(0, (uint8_t *)dnode, CHFS_NODE_HDR_SIZE - 4);
822 if (crc != le32toh(dnode->hdr_crc)) {
823 chfs_err("CRC check failed. calc: 0x%x orig: 0x%x\n", crc, le32toh(dnode->hdr_crc));
824 goto cont;
827 /* Check header magic bitmask. */
828 if (le16toh(dnode->magic) != CHFS_FS_MAGIC_BITMASK) {
829 chfs_err("Wrong magic bitmask.\n");
830 goto cont;
833 /* Check node crc. */
834 crc = crc32(0, (uint8_t *)dnode, sizeof(*dnode) - 4);
835 if (crc != le32toh(dnode->node_crc)) {
836 chfs_err("Node CRC check failed. calc: 0x%x orig: 0x%x\n", crc, le32toh(dnode->node_crc));
837 goto cont;
840 td = chfs_alloc_tmp_dnode();
841 if (!td) {
842 chfs_err("Can't allocate tmp dnode info.\n");
843 err = ENOMEM;
844 goto out;
847 /* We don't check data crc here, just add nodes to tmp frag tree, because
848 * we don't want to check nodes which have been overlapped by a new node
849 * with a higher version number.
851 td->node = chfs_alloc_full_dnode();
852 if (!td->node) {
853 chfs_err("Can't allocate full dnode info.\n");
854 err = ENOMEM;
855 goto out_tmp_dnode;
857 td->version = le64toh(dnode->version);
858 td->node->ofs = le64toh(dnode->offset);
859 td->data_crc = le32toh(dnode->data_crc);
860 td->node->nref = nref;
861 td->node->size = le32toh(dnode->data_length);
862 td->node->frags = 1;
863 td->overlapped = 0;
865 if (td->version > rii->highest_version) {
866 rii->highest_version = td->version;
869 /* Add node to the tree. */
870 err = chfs_add_tmp_dnode_to_tree(chmp, rii, td);
871 if (err)
872 goto out_full_dnode;
874 cont:
875 nref = chfs_first_valid_data_ref(nref->nref_next);
878 ip->chvc->highest_version = rii->highest_version;
879 return 0;
881 out_full_dnode:
882 chfs_free_full_dnode(td->node);
883 out_tmp_dnode:
884 chfs_free_tmp_dnode(td);
885 out:
886 kmem_free(buf, len);
887 kmem_free(dnode, len);
888 return err;
892 /* chfs_build_fragtree - builds fragtree from temporary tree */
894 chfs_build_fragtree(struct chfs_mount *chmp, struct chfs_inode *ip,
895 struct chfs_readinode_info *rii)
897 struct chfs_tmp_dnode_info *pen, *last, *this;
898 struct rb_tree ver_tree; /* version tree, used only temporary */
899 uint64_t high_ver = 0;
900 KASSERT(mutex_owned(&chmp->chm_lock_mountfields));
902 rb_tree_init(&ver_tree, &tmp_node_rbtree_ops);
904 /* Update highest version and latest node reference. */
905 if (rii->mdata_tn) {
906 high_ver = rii->mdata_tn->tmpnode->version;
907 rii->latest_ref = rii->mdata_tn->tmpnode->node->nref;
910 /* Iterate the temporary tree in reverse order. */
911 pen = (struct chfs_tmp_dnode_info *)RB_TREE_MAX(&rii->tdi_root);
913 while((last = pen)) {
914 pen = (struct chfs_tmp_dnode_info *)rb_tree_iterate(&rii->tdi_root, last, RB_DIR_LEFT);
916 /* We build here a version tree from overlapped nodes. */
917 rb_tree_remove_node(&rii->tdi_root, last);
918 rb_tree_insert_node(&ver_tree, last);
920 if (last->tmpnode->overlapped) {
921 if (pen)
922 continue;
924 last->tmpnode->overlapped = 0;
927 this = (struct chfs_tmp_dnode_info *)RB_TREE_MAX(&ver_tree);
929 /* Start to build the fragtree. */
930 while (this) {
931 struct chfs_tmp_dnode_info *vers_next;
932 int ret;
934 vers_next = (struct chfs_tmp_dnode_info *)rb_tree_iterate(&ver_tree, this, RB_DIR_LEFT);
935 rb_tree_remove_node(&ver_tree, this);
937 struct chfs_tmp_dnode *tmp_td = this->tmpnode;
938 while (tmp_td) {
939 struct chfs_tmp_dnode *next_td = tmp_td->next;
941 /* Check temporary node. */
942 if (chfs_check_td_node(chmp, tmp_td)) {
943 if (next_td) {
944 chfs_remove_tmp_dnode_from_tdi(this, tmp_td);
945 chfs_kill_td(chmp, tmp_td);
946 } else {
947 break;
949 } else {
950 if (tmp_td->version > high_ver) {
951 high_ver = tmp_td->version;
952 dbg("highver: %llu\n", (unsigned long long)high_ver);
953 rii->latest_ref = tmp_td->node->nref;
956 /* Add node to inode and its fragtree. */
957 ret = chfs_add_full_dnode_to_inode(chmp, ip, tmp_td->node);
958 if (ret) {
959 /* On error, clean the whole version tree. */
960 while (1) {
961 vers_next = (struct chfs_tmp_dnode_info *)rb_tree_iterate(&ver_tree, this, RB_DIR_LEFT);
962 while (tmp_td) {
963 next_td = tmp_td->next;
965 chfs_free_full_dnode(tmp_td->node);
966 chfs_remove_tmp_dnode_from_tdi(this, tmp_td);
967 chfs_kill_td(chmp, tmp_td);
968 tmp_td = next_td;
970 chfs_free_tmp_dnode_info(this);
971 this = vers_next;
972 if (!this)
973 break;
974 rb_tree_remove_node(&ver_tree, vers_next);
975 chfs_kill_tdi(chmp, vers_next);
977 return ret;
980 /* Remove temporary node from temporary descriptor.
981 * Shouldn't obsolete tmp_td here, because tmp_td->node
982 * was added to the inode. */
983 chfs_remove_tmp_dnode_from_tdi(this, tmp_td);
984 chfs_free_tmp_dnode(tmp_td);
986 tmp_td = next_td;
988 /* Continue with the previous element of version tree. */
989 chfs_kill_tdi(chmp, this);
990 this = vers_next;
994 return 0;
997 /* chfs_read_inode - checks the state of the inode then reads and builds it */
998 int chfs_read_inode(struct chfs_mount *chmp, struct chfs_inode *ip)
1000 struct chfs_vnode_cache *vc = ip->chvc;
1002 KASSERT(mutex_owned(&chmp->chm_lock_mountfields));
1004 retry:
1005 mutex_enter(&chmp->chm_lock_vnocache);
1006 switch (vc->state) {
1007 case VNO_STATE_UNCHECKED:
1008 /* FALLTHROUGH */
1009 case VNO_STATE_CHECKEDABSENT:
1010 vc->state = VNO_STATE_READING;
1011 break;
1012 case VNO_STATE_CHECKING:
1013 /* FALLTHROUGH */
1014 case VNO_STATE_GC:
1015 mutex_exit(&chmp->chm_lock_vnocache);
1016 goto retry;
1017 break;
1018 case VNO_STATE_PRESENT:
1019 /* FALLTHROUGH */
1020 case VNO_STATE_READING:
1021 chfs_err("Reading inode #%llu in state %d!\n",
1022 (unsigned long long)vc->vno, vc->state);
1023 chfs_err("wants to read a nonexistent ino %llu\n",
1024 (unsigned long long)vc->vno);
1025 return ENOENT;
1026 default:
1027 panic("BUG() Bad vno cache state.");
1029 mutex_exit(&chmp->chm_lock_vnocache);
1031 return chfs_read_inode_internal(chmp, ip);
1035 * chfs_read_inode_internal - reads and builds an inode
1036 * Firstly get temporary nodes then build fragtree.
1039 chfs_read_inode_internal(struct chfs_mount *chmp, struct chfs_inode *ip)
1041 int err;
1042 size_t len, retlen;
1043 char* buf;
1044 struct chfs_readinode_info rii;
1045 struct chfs_flash_vnode *fvnode;
1047 KASSERT(mutex_owned(&chmp->chm_lock_mountfields));
1049 len = sizeof(*fvnode);
1051 memset(&rii, 0, sizeof(rii));
1053 rb_tree_init(&rii.tdi_root, &tmp_node_rbtree_ops);
1055 /* Build a temporary node tree. */
1056 err = chfs_get_data_nodes(chmp, ip, &rii);
1057 if (err) {
1058 if (ip->chvc->state == VNO_STATE_READING)
1059 ip->chvc->state = VNO_STATE_CHECKEDABSENT;
1060 /* FIXME Should we kill fragtree or something here? */
1061 return err;
1064 /* Build fragtree from temp nodes. */
1065 rb_tree_init(&ip->fragtree, &frag_rbtree_ops);
1067 err = chfs_build_fragtree(chmp, ip, &rii);
1068 if (err) {
1069 if (ip->chvc->state == VNO_STATE_READING)
1070 ip->chvc->state = VNO_STATE_CHECKEDABSENT;
1071 /* FIXME Should we kill fragtree or something here? */
1072 return err;
1075 if (!rii.latest_ref) {
1076 return 0;
1079 buf = kmem_alloc(len, KM_SLEEP);
1080 if (!buf)
1081 return ENOMEM;
1083 /* Set inode size from its vnode information node. */
1084 err = chfs_read_leb(chmp, ip->chvc->v->nref_lnr, buf, CHFS_GET_OFS(ip->chvc->v->nref_offset), len, &retlen);
1085 if (err || retlen != len) {
1086 kmem_free(buf, len);
1087 return err?err:EIO;
1090 fvnode = (struct chfs_flash_vnode*)buf;
1092 dbg("set size from v: %u\n", fvnode->dn_size);
1093 chfs_set_vnode_size(ITOV(ip), fvnode->dn_size);
1094 uint32_t retsize = chfs_truncate_fragtree(chmp, &ip->fragtree, fvnode->dn_size);
1095 if (retsize != fvnode->dn_size) {
1096 dbg("Truncating failed. It is %u instead of %u\n", retsize, fvnode->dn_size);
1099 kmem_free(buf, len);
1101 if (ip->chvc->state == VNO_STATE_READING) {
1102 ip->chvc->state = VNO_STATE_PRESENT;
1105 return 0;
1108 /* chfs_read_data - reads and checks data of a file */
1110 chfs_read_data(struct chfs_mount* chmp, struct vnode *vp,
1111 struct buf *bp)
1113 off_t ofs;
1114 struct chfs_node_frag *frag;
1115 char * buf;
1116 int err = 0;
1117 size_t size, retlen;
1118 uint32_t crc;
1119 struct chfs_inode *ip = VTOI(vp);
1120 struct chfs_flash_data_node *dnode;
1121 struct chfs_node_ref *nref;
1123 memset(bp->b_data, 0, bp->b_bcount);
1125 /* Calculate the size of the file from its fragtree. */
1126 ofs = bp->b_blkno * PAGE_SIZE;
1127 frag = (struct chfs_node_frag *)rb_tree_find_node_leq(&ip->fragtree, &ofs);
1129 if (!frag || frag->ofs > ofs || frag->ofs + frag->size <= ofs) {
1130 bp->b_resid = 0;
1131 dbg("not found in frag tree\n");
1132 return 0;
1135 if (!frag->node) {
1136 dbg("no node in frag\n");
1137 return 0;
1140 nref = frag->node->nref;
1141 size = sizeof(*dnode) + frag->size;
1143 buf = kmem_alloc(size, KM_SLEEP);
1145 /* Read node from flash. */
1146 dbg("reading from lnr: %u, offset: %u, size: %zu\n", nref->nref_lnr, CHFS_GET_OFS(nref->nref_offset), size);
1147 err = chfs_read_leb(chmp, nref->nref_lnr, buf, CHFS_GET_OFS(nref->nref_offset), size, &retlen);
1148 if (err) {
1149 chfs_err("error after reading: %d\n", err);
1150 goto out;
1152 if (retlen != size) {
1153 chfs_err("retlen: %zu != size: %zu\n", retlen, size);
1154 err = EIO;
1155 goto out;
1158 /* Read data from flash. */
1159 dnode = (struct chfs_flash_data_node *)buf;
1160 crc = crc32(0, (uint8_t *)dnode, CHFS_NODE_HDR_SIZE - 4);
1161 if (crc != le32toh(dnode->hdr_crc)) {
1162 chfs_err("CRC check failed. calc: 0x%x orig: 0x%x\n", crc, le32toh(dnode->hdr_crc));
1163 err = EIO;
1164 goto out;
1167 /* Check header magic bitmask. */
1168 if (le16toh(dnode->magic) != CHFS_FS_MAGIC_BITMASK) {
1169 chfs_err("Wrong magic bitmask.\n");
1170 err = EIO;
1171 goto out;
1174 /* Check crc of node. */
1175 crc = crc32(0, (uint8_t *)dnode, sizeof(*dnode) - 4);
1176 if (crc != le32toh(dnode->node_crc)) {
1177 chfs_err("Node CRC check failed. calc: 0x%x orig: 0x%x\n", crc, le32toh(dnode->node_crc));
1178 err = EIO;
1179 goto out;
1182 /* Check crc of data. */
1183 crc = crc32(0, (uint8_t *)dnode->data, dnode->data_length);
1184 if (crc != le32toh(dnode->data_crc)) {
1185 chfs_err("Data CRC check failed. calc: 0x%x orig: 0x%x\n", crc, le32toh(dnode->data_crc));
1186 err = EIO;
1187 goto out;
1190 memcpy(bp->b_data, dnode->data, dnode->data_length);
1191 bp->b_resid = 0;
1193 out:
1194 kmem_free(buf, size);
1195 return err;