1 /* COPYRIGHT 2001, 2002, 2003 by Hans Reiser, licensing governed by reiser4/README */
4 #include "../../inode.h"
5 #include "../../page_cache.h"
8 #include <linux/quotaops.h>
9 #include <linux/swap.h>
11 static inline reiser4_extent
*ext_by_offset(const znode
*node
, int offset
)
15 ext
= (reiser4_extent
*) (zdata(node
) + offset
);
20 * check_uf_coord - verify coord extension
24 * Makes sure that all fields of @uf_coord are set properly. If @key is
25 * specified - check whether @uf_coord is set correspondingly.
27 static void check_uf_coord(const uf_coord_t
*uf_coord
, const reiser4_key
*key
)
31 const struct extent_coord_extension
*ext_coord
;
34 coord
= &uf_coord
->coord
;
35 ext_coord
= &uf_coord
->extension
.extent
;
36 ext
= ext_by_offset(coord
->node
, uf_coord
->extension
.extent
.ext_offset
);
39 WITH_DATA(coord
->node
,
40 (uf_coord
->valid
== 1 &&
41 coord_is_iplug_set(coord
) &&
42 item_is_extent(coord
) &&
43 ext_coord
->nr_units
== nr_units_extent(coord
) &&
44 ext
== extent_by_coord(coord
) &&
45 ext_coord
->width
== extent_get_width(ext
) &&
46 coord
->unit_pos
< ext_coord
->nr_units
&&
47 ext_coord
->pos_in_unit
< ext_coord
->width
&&
48 memcmp(ext
, &ext_coord
->extent
,
49 sizeof(reiser4_extent
)) == 0)));
51 reiser4_key coord_key
;
53 unit_key_by_coord(&uf_coord
->coord
, &coord_key
);
54 set_key_offset(&coord_key
,
55 get_key_offset(&coord_key
) +
56 (uf_coord
->extension
.extent
.
57 pos_in_unit
<< PAGE_CACHE_SHIFT
));
58 assert("", keyeq(key
, &coord_key
));
63 static inline reiser4_extent
*ext_by_ext_coord(const uf_coord_t
*uf_coord
)
65 check_uf_coord(uf_coord
, NULL
);
67 return ext_by_offset(uf_coord
->coord
.node
,
68 uf_coord
->extension
.extent
.ext_offset
);
79 /* return 1 if offset @off is inside of extent unit pointed to by @coord. Set
80 pos_in_unit inside of unit correspondingly */
81 static int offset_is_in_unit(const coord_t
*coord
, loff_t off
)
87 ext
= extent_by_coord(coord
);
89 unit_key_extent(coord
, &unit_key
);
90 unit_off
= get_key_offset(&unit_key
);
93 if (off
>= (unit_off
+ (current_blocksize
* extent_get_width(ext
))))
99 coord_matches_key_extent(const coord_t
* coord
, const reiser4_key
* key
)
101 reiser4_key item_key
;
103 assert("vs-771", coord_is_existing_unit(coord
));
104 assert("vs-1258", keylt(key
, append_key_extent(coord
, &item_key
)));
105 assert("vs-1259", keyge(key
, item_key_by_coord(coord
, &item_key
)));
107 return offset_is_in_unit(coord
, get_key_offset(key
));
117 * Returns 1 if @key is equal to an append key of item @coord is set to
119 static int can_append(const reiser4_key
*key
, const coord_t
*coord
)
121 reiser4_key append_key
;
123 return keyeq(key
, append_key_extent(coord
, &append_key
));
133 static int append_hole(coord_t
*coord
, lock_handle
*lh
,
134 const reiser4_key
*key
)
136 reiser4_key append_key
;
137 reiser4_block_nr hole_width
;
138 reiser4_extent
*ext
, new_ext
;
139 reiser4_item_data idata
;
141 /* last item of file may have to be appended with hole */
142 assert("vs-708", znode_get_level(coord
->node
) == TWIG_LEVEL
);
143 assert("vs-714", item_id_by_coord(coord
) == EXTENT_POINTER_ID
);
145 /* key of first byte which is not addressed by this extent */
146 append_key_extent(coord
, &append_key
);
148 assert("", keyle(&append_key
, key
));
151 * extent item has to be appended with hole. Calculate length of that
154 hole_width
= ((get_key_offset(key
) - get_key_offset(&append_key
) +
155 current_blocksize
- 1) >> current_blocksize_bits
);
156 assert("vs-954", hole_width
> 0);
158 /* set coord after last unit */
159 coord_init_after_item_end(coord
);
161 /* get last extent in the item */
162 ext
= extent_by_coord(coord
);
163 if (state_of_extent(ext
) == HOLE_EXTENT
) {
165 * last extent of a file is hole extent. Widen that extent by
166 * @hole_width blocks. Note that we do not worry about
167 * overflowing - extent width is 64 bits
169 reiser4_set_extent(ext
, HOLE_EXTENT_START
,
170 extent_get_width(ext
) + hole_width
);
171 znode_make_dirty(coord
->node
);
175 /* append last item of the file with hole extent unit */
176 assert("vs-713", (state_of_extent(ext
) == ALLOCATED_EXTENT
||
177 state_of_extent(ext
) == UNALLOCATED_EXTENT
));
179 reiser4_set_extent(&new_ext
, HOLE_EXTENT_START
, hole_width
);
180 init_new_extent(&idata
, &new_ext
, 1);
181 return insert_into_item(coord
, lh
, &append_key
, &idata
, 0);
186 * @twig: longterm locked twig node
190 static void check_jnodes(znode
*twig
, const reiser4_key
*key
, int count
)
194 reiser4_key node_key
, jnode_key
;
198 assert("", twig
!= NULL
);
199 assert("", znode_get_level(twig
) == TWIG_LEVEL
);
200 assert("", znode_is_write_locked(twig
));
203 /* get the smallest key in twig node */
204 coord_init_first_unit(&c
, twig
);
205 unit_key_by_coord(&c
, &node_key
);
206 assert("", keyle(&node_key
, &jnode_key
));
208 coord_init_last_unit(&c
, twig
);
209 unit_key_by_coord(&c
, &node_key
);
210 if (item_plugin_by_coord(&c
)->s
.file
.append_key
)
211 item_plugin_by_coord(&c
)->s
.file
.append_key(&c
, &node_key
);
212 set_key_offset(&jnode_key
,
213 get_key_offset(&jnode_key
) + (loff_t
)count
* PAGE_CACHE_SIZE
- 1);
214 assert("", keylt(&jnode_key
, &node_key
));
220 * append_last_extent - append last file item
221 * @uf_coord: coord to start insertion from
222 * @jnodes: array of jnodes
223 * @count: number of jnodes in the array
225 * There is already at least one extent item of file @inode in the tree. Append
226 * the last of them with unallocated extent unit of width @count. Assign
227 * fake block numbers to jnodes corresponding to the inserted extent.
229 static int append_last_extent(uf_coord_t
*uf_coord
, const reiser4_key
*key
,
230 jnode
**jnodes
, int count
)
233 reiser4_extent new_ext
;
234 reiser4_item_data idata
;
236 struct extent_coord_extension
*ext_coord
;
238 reiser4_block_nr block
;
242 coord
= &uf_coord
->coord
;
243 ext_coord
= &uf_coord
->extension
.extent
;
244 ext
= ext_by_ext_coord(uf_coord
);
246 /* check correctness of position in the item */
247 assert("vs-228", coord
->unit_pos
== coord_last_unit_pos(coord
));
248 assert("vs-1311", coord
->between
== AFTER_UNIT
);
249 assert("vs-1302", ext_coord
->pos_in_unit
== ext_coord
->width
- 1);
251 if (!can_append(key
, coord
)) {
252 /* hole extent has to be inserted */
253 result
= append_hole(coord
, uf_coord
->lh
, key
);
261 assert("", get_key_offset(key
) == (loff_t
)index_jnode(jnodes
[0]) * PAGE_CACHE_SIZE
);
263 result
= DQUOT_ALLOC_BLOCK_NODIRTY(mapping_jnode(jnodes
[0])->host
,
267 switch (state_of_extent(ext
)) {
268 case UNALLOCATED_EXTENT
:
270 * last extent unit of the file is unallocated one. Increase
271 * its width by @count
273 reiser4_set_extent(ext
, UNALLOCATED_EXTENT_START
,
274 extent_get_width(ext
) + count
);
275 znode_make_dirty(coord
->node
);
277 /* update coord extension */
278 ext_coord
->width
+= count
;
279 ON_DEBUG(extent_set_width
280 (&uf_coord
->extension
.extent
.extent
,
285 case ALLOCATED_EXTENT
:
287 * last extent unit of the file is either hole or allocated
288 * one. Append one unallocated extent of width @count
290 reiser4_set_extent(&new_ext
, UNALLOCATED_EXTENT_START
, count
);
291 init_new_extent(&idata
, &new_ext
, 1);
292 result
= insert_into_item(coord
, uf_coord
->lh
, key
, &idata
, 0);
303 * make sure that we hold long term locked twig node containing all
304 * jnodes we are about to capture
306 check_jnodes(uf_coord
->lh
->node
, key
, count
);
309 * assign fake block numbers to all jnodes. FIXME: make sure whether
310 * twig node containing inserted extent item is locked
312 block
= fake_blocknr_unformatted(count
);
313 for (i
= 0; i
< count
; i
++, block
++) {
315 spin_lock_jnode(node
);
316 JF_SET(node
, JNODE_CREATED
);
317 jnode_set_block(node
, &block
);
318 result
= reiser4_try_capture(node
, ZNODE_WRITE_LOCK
, 0);
320 jnode_make_dirty_locked(node
);
321 spin_unlock_jnode(node
);
327 * insert_first_hole - inser hole extent into tree
334 static int insert_first_hole(coord_t
*coord
, lock_handle
*lh
,
335 const reiser4_key
*key
)
337 reiser4_extent new_ext
;
338 reiser4_item_data idata
;
339 reiser4_key item_key
;
340 reiser4_block_nr hole_width
;
342 /* @coord must be set for inserting of new item */
343 assert("vs-711", coord_is_between_items(coord
));
346 set_key_offset(&item_key
, 0ull);
348 hole_width
= ((get_key_offset(key
) + current_blocksize
- 1) >>
349 current_blocksize_bits
);
350 assert("vs-710", hole_width
> 0);
352 /* compose body of hole extent and insert item into tree */
353 reiser4_set_extent(&new_ext
, HOLE_EXTENT_START
, hole_width
);
354 init_new_extent(&idata
, &new_ext
, 1);
355 return insert_extent_by_coord(coord
, &idata
, &item_key
, lh
);
360 * insert_first_extent - insert first file item
361 * @inode: inode of file
362 * @uf_coord: coord to start insertion from
363 * @jnodes: array of jnodes
364 * @count: number of jnodes in the array
367 * There are no items of file @inode in the tree yet. Insert unallocated extent
368 * of width @count into tree or hole extent if writing not to the
369 * beginning. Assign fake block numbers to jnodes corresponding to the inserted
370 * unallocated extent. Returns number of jnodes or error code.
372 static int insert_first_extent(uf_coord_t
*uf_coord
, const reiser4_key
*key
,
373 jnode
**jnodes
, int count
,
378 reiser4_extent new_ext
;
379 reiser4_item_data idata
;
380 reiser4_block_nr block
;
381 struct unix_file_info
*uf_info
;
384 /* first extent insertion starts at leaf level */
385 assert("vs-719", znode_get_level(uf_coord
->coord
.node
) == LEAF_LEVEL
);
386 assert("vs-711", coord_is_between_items(&uf_coord
->coord
));
388 if (get_key_offset(key
) != 0) {
389 result
= insert_first_hole(&uf_coord
->coord
, uf_coord
->lh
, key
);
391 uf_info
= unix_file_inode_data(inode
);
394 * first item insertion is only possible when writing to empty
395 * file or performing tail conversion
397 assert("", (uf_info
->container
== UF_CONTAINER_EMPTY
||
398 (reiser4_inode_get_flag(inode
,
399 REISER4_PART_MIXED
) &&
400 reiser4_inode_get_flag(inode
,
401 REISER4_PART_IN_CONV
))));
402 /* if file was empty - update its state */
403 if (result
== 0 && uf_info
->container
== UF_CONTAINER_EMPTY
)
404 uf_info
->container
= UF_CONTAINER_EXTENTS
;
411 result
= DQUOT_ALLOC_BLOCK_NODIRTY(mapping_jnode(jnodes
[0])->host
, count
);
415 * prepare for tree modification: compose body of item and item data
416 * structure needed for insertion
418 reiser4_set_extent(&new_ext
, UNALLOCATED_EXTENT_START
, count
);
419 init_new_extent(&idata
, &new_ext
, 1);
421 /* insert extent item into the tree */
422 result
= insert_extent_by_coord(&uf_coord
->coord
, &idata
, key
,
428 * make sure that we hold long term locked twig node containing all
429 * jnodes we are about to capture
431 check_jnodes(uf_coord
->lh
->node
, key
, count
);
433 * assign fake block numbers to all jnodes, capture and mark them dirty
435 block
= fake_blocknr_unformatted(count
);
436 for (i
= 0; i
< count
; i
++, block
++) {
438 spin_lock_jnode(node
);
439 JF_SET(node
, JNODE_CREATED
);
440 jnode_set_block(node
, &block
);
441 result
= reiser4_try_capture(node
, ZNODE_WRITE_LOCK
, 0);
443 jnode_make_dirty_locked(node
);
444 spin_unlock_jnode(node
);
448 * invalidate coordinate, research must be performed to continue
449 * because write will continue on twig level
456 * plug_hole - replace hole extent with unallocated and holes
460 * @h: structure containing coordinate, lock handle, key, etc
462 * Creates an unallocated extent of width 1 within a hole. In worst case two
463 * additional extents can be created.
465 static int plug_hole(uf_coord_t
*uf_coord
, const reiser4_key
*key
, int *how
)
467 struct replace_handle rh
;
469 reiser4_block_nr width
, pos_in_unit
;
471 struct extent_coord_extension
*ext_coord
;
472 int return_inserted_position
;
474 check_uf_coord(uf_coord
, key
);
476 rh
.coord
= coord_by_uf_coord(uf_coord
);
477 rh
.lh
= uf_coord
->lh
;
480 coord
= coord_by_uf_coord(uf_coord
);
481 ext_coord
= ext_coord_by_uf_coord(uf_coord
);
482 ext
= ext_by_ext_coord(uf_coord
);
484 width
= ext_coord
->width
;
485 pos_in_unit
= ext_coord
->pos_in_unit
;
489 reiser4_set_extent(ext
, UNALLOCATED_EXTENT_START
, 1);
490 znode_make_dirty(coord
->node
);
491 /* update uf_coord */
492 ON_DEBUG(ext_coord
->extent
= *ext
);
495 } else if (pos_in_unit
== 0) {
496 /* we deal with first element of extent */
497 if (coord
->unit_pos
) {
498 /* there is an extent to the left */
499 if (state_of_extent(ext
- 1) == UNALLOCATED_EXTENT
) {
501 * left neighboring unit is an unallocated
502 * extent. Increase its width and decrease
505 extent_set_width(ext
- 1,
506 extent_get_width(ext
- 1) + 1);
507 extent_set_width(ext
, width
- 1);
508 znode_make_dirty(coord
->node
);
510 /* update coord extension */
512 ext_coord
->width
= extent_get_width(ext
- 1);
513 ext_coord
->pos_in_unit
= ext_coord
->width
- 1;
514 ext_coord
->ext_offset
-= sizeof(reiser4_extent
);
515 ON_DEBUG(ext_coord
->extent
=
516 *extent_by_coord(coord
));
521 /* extent for replace */
522 reiser4_set_extent(&rh
.overwrite
, UNALLOCATED_EXTENT_START
, 1);
523 /* extent to be inserted */
524 reiser4_set_extent(&rh
.new_extents
[0], HOLE_EXTENT_START
,
526 rh
.nr_new_extents
= 1;
528 /* have reiser4_replace_extent to return with @coord and
529 @uf_coord->lh set to unit which was replaced */
530 return_inserted_position
= 0;
532 } else if (pos_in_unit
== width
- 1) {
533 /* we deal with last element of extent */
534 if (coord
->unit_pos
< nr_units_extent(coord
) - 1) {
535 /* there is an extent unit to the right */
536 if (state_of_extent(ext
+ 1) == UNALLOCATED_EXTENT
) {
538 * right neighboring unit is an unallocated
539 * extent. Increase its width and decrease
542 extent_set_width(ext
+ 1,
543 extent_get_width(ext
+ 1) + 1);
544 extent_set_width(ext
, width
- 1);
545 znode_make_dirty(coord
->node
);
547 /* update coord extension */
549 ext_coord
->width
= extent_get_width(ext
+ 1);
550 ext_coord
->pos_in_unit
= 0;
551 ext_coord
->ext_offset
+= sizeof(reiser4_extent
);
552 ON_DEBUG(ext_coord
->extent
=
553 *extent_by_coord(coord
));
558 /* extent for replace */
559 reiser4_set_extent(&rh
.overwrite
, HOLE_EXTENT_START
, width
- 1);
560 /* extent to be inserted */
561 reiser4_set_extent(&rh
.new_extents
[0], UNALLOCATED_EXTENT_START
,
563 rh
.nr_new_extents
= 1;
565 /* have reiser4_replace_extent to return with @coord and
566 @uf_coord->lh set to unit which was inserted */
567 return_inserted_position
= 1;
570 /* extent for replace */
571 reiser4_set_extent(&rh
.overwrite
, HOLE_EXTENT_START
,
573 /* extents to be inserted */
574 reiser4_set_extent(&rh
.new_extents
[0], UNALLOCATED_EXTENT_START
,
576 reiser4_set_extent(&rh
.new_extents
[1], HOLE_EXTENT_START
,
577 width
- pos_in_unit
- 1);
578 rh
.nr_new_extents
= 2;
580 /* have reiser4_replace_extent to return with @coord and
581 @uf_coord->lh set to first of units which were inserted */
582 return_inserted_position
= 1;
585 unit_key_by_coord(coord
, &rh
.paste_key
);
586 set_key_offset(&rh
.paste_key
, get_key_offset(&rh
.paste_key
) +
587 extent_get_width(&rh
.overwrite
) * current_blocksize
);
590 return reiser4_replace_extent(&rh
, return_inserted_position
);
594 * overwrite_one_block -
599 * If @node corresponds to hole extent - create unallocated extent for it and
600 * assign fake block number. If @node corresponds to allocated extent - assign
601 * block number of jnode
603 static int overwrite_one_block(uf_coord_t
*uf_coord
, const reiser4_key
*key
,
604 jnode
*node
, int *hole_plugged
)
607 struct extent_coord_extension
*ext_coord
;
609 reiser4_block_nr block
;
612 assert("vs-1312", uf_coord
->coord
.between
== AT_UNIT
);
615 ext_coord
= ext_coord_by_uf_coord(uf_coord
);
616 ext
= ext_by_ext_coord(uf_coord
);
617 assert("", state_of_extent(ext
) != UNALLOCATED_EXTENT
);
619 switch (state_of_extent(ext
)) {
620 case ALLOCATED_EXTENT
:
621 block
= extent_get_start(ext
) + ext_coord
->pos_in_unit
;
625 result
= DQUOT_ALLOC_BLOCK_NODIRTY(mapping_jnode(node
)->host
, 1);
627 result
= plug_hole(uf_coord
, key
, &how
);
630 block
= fake_blocknr_unformatted(1);
633 JF_SET(node
, JNODE_CREATED
);
640 jnode_set_block(node
, &block
);
645 * move_coord - move coordinate forward
648 * Move coordinate one data block pointer forward. Return 1 if coord is set to
649 * the last one already or is invalid.
651 static int move_coord(uf_coord_t
*uf_coord
)
653 struct extent_coord_extension
*ext_coord
;
655 if (uf_coord
->valid
== 0)
657 ext_coord
= &uf_coord
->extension
.extent
;
658 ext_coord
->pos_in_unit
++;
659 if (ext_coord
->pos_in_unit
< ext_coord
->width
)
660 /* coordinate moved within the unit */
663 /* end of unit is reached. Try to move to next unit */
664 ext_coord
->pos_in_unit
= 0;
665 uf_coord
->coord
.unit_pos
++;
666 if (uf_coord
->coord
.unit_pos
< ext_coord
->nr_units
) {
667 /* coordinate moved to next unit */
668 ext_coord
->ext_offset
+= sizeof(reiser4_extent
);
670 extent_get_width(ext_by_offset
671 (uf_coord
->coord
.node
,
672 ext_coord
->ext_offset
));
673 ON_DEBUG(ext_coord
->extent
=
674 *ext_by_offset(uf_coord
->coord
.node
,
675 ext_coord
->ext_offset
));
678 /* end of item is reached */
687 * Returns number of handled jnodes.
689 static int overwrite_extent(uf_coord_t
*uf_coord
, const reiser4_key
*key
,
690 jnode
**jnodes
, int count
, int *plugged_hole
)
698 for (i
= 0; i
< count
; i
++) {
700 if (*jnode_get_block(node
) == 0) {
701 result
= overwrite_one_block(uf_coord
, &k
, node
, plugged_hole
);
706 * make sure that we hold long term locked twig node containing
707 * all jnodes we are about to capture
709 check_jnodes(uf_coord
->lh
->node
, &k
, 1);
711 * assign fake block numbers to all jnodes, capture and mark
714 spin_lock_jnode(node
);
715 result
= reiser4_try_capture(node
, ZNODE_WRITE_LOCK
, 0);
717 jnode_make_dirty_locked(node
);
718 spin_unlock_jnode(node
);
720 if (uf_coord
->valid
== 0)
723 check_uf_coord(uf_coord
, &k
);
725 if (move_coord(uf_coord
)) {
727 * failed to move to the next node pointer. Either end
728 * of file or end of twig node is reached. In the later
729 * case we might go to the right neighbor.
734 set_key_offset(&k
, get_key_offset(&k
) + PAGE_CACHE_SIZE
);
741 * reiser4_update_extent
748 int reiser4_update_extent(struct inode
*inode
, jnode
*node
, loff_t pos
,
758 assert("", reiser4_lock_counters()->d_refs
== 0);
760 key_by_inode_and_offset_common(inode
, pos
, &key
);
762 init_uf_coord(&uf_coord
, &lh
);
763 coord
= &uf_coord
.coord
;
764 result
= find_file_item_nohint(coord
, &lh
, &key
,
765 ZNODE_WRITE_LOCK
, inode
);
766 if (IS_CBKERR(result
)) {
767 assert("", reiser4_lock_counters()->d_refs
== 0);
771 result
= zload(coord
->node
);
773 loaded
= coord
->node
;
775 if (coord
->between
== AFTER_UNIT
) {
777 * append existing extent item with unallocated extent of width
780 init_coord_extension_extent(&uf_coord
,
781 get_key_offset(&key
));
782 result
= append_last_extent(&uf_coord
, &key
,
784 } else if (coord
->between
== AT_UNIT
) {
787 * not optimal yet. Will be optimized if new write will show
790 init_coord_extension_extent(&uf_coord
,
791 get_key_offset(&key
));
792 result
= overwrite_extent(&uf_coord
, &key
,
793 &node
, 1, plugged_hole
);
796 * there are no items of this file in the tree yet. Create
797 * first item of the file inserting one unallocated extent of
800 result
= insert_first_extent(&uf_coord
, &key
, &node
, 1, inode
);
802 assert("", result
== 1 || result
< 0);
805 assert("", reiser4_lock_counters()->d_refs
== 0);
806 return (result
== 1) ? 0 : result
;
817 static int update_extents(struct file
*file
, struct inode
*inode
,
818 jnode
**jnodes
, int count
, loff_t pos
)
825 result
= load_file_hint(file
, &hint
);
830 * count == 0 is special case: expanding truncate
832 pos
= (loff_t
)index_jnode(jnodes
[0]) << PAGE_CACHE_SHIFT
;
833 key_by_inode_and_offset_common(inode
, pos
, &key
);
835 assert("", reiser4_lock_counters()->d_refs
== 0);
838 result
= find_file_item(&hint
, &key
, ZNODE_WRITE_LOCK
, inode
);
839 if (IS_CBKERR(result
)) {
840 assert("", reiser4_lock_counters()->d_refs
== 0);
844 result
= zload(hint
.ext_coord
.coord
.node
);
846 loaded
= hint
.ext_coord
.coord
.node
;
848 if (hint
.ext_coord
.coord
.between
== AFTER_UNIT
) {
850 * append existing extent item with unallocated extent
853 if (hint
.ext_coord
.valid
== 0)
854 /* NOTE: get statistics on this */
855 init_coord_extension_extent(&hint
.ext_coord
,
856 get_key_offset(&key
));
857 result
= append_last_extent(&hint
.ext_coord
, &key
,
859 } else if (hint
.ext_coord
.coord
.between
== AT_UNIT
) {
862 * not optimal yet. Will be optimized if new write will
863 * show performance win.
865 if (hint
.ext_coord
.valid
== 0)
866 /* NOTE: get statistics on this */
867 init_coord_extension_extent(&hint
.ext_coord
,
868 get_key_offset(&key
));
869 result
= overwrite_extent(&hint
.ext_coord
, &key
,
870 jnodes
, count
, NULL
);
873 * there are no items of this file in the tree
874 * yet. Create first item of the file inserting one
875 * unallocated extent of * width nr_jnodes
877 result
= insert_first_extent(&hint
.ext_coord
, &key
,
878 jnodes
, count
, inode
);
882 done_lh(hint
.ext_coord
.lh
);
888 set_key_offset(&key
, get_key_offset(&key
) + result
* PAGE_CACHE_SIZE
);
890 /* seal and unlock znode */
891 if (hint
.ext_coord
.valid
)
892 reiser4_set_hint(&hint
, &key
, ZNODE_WRITE_LOCK
);
894 reiser4_unset_hint(&hint
);
898 save_file_hint(file
, &hint
);
899 assert("", reiser4_lock_counters()->d_refs
== 0);
904 * write_extent_reserve_space - reserve space for extent write operation
907 * Estimates and reserves space which may be required for writing
908 * WRITE_GRANULARITY pages of file.
910 static int write_extent_reserve_space(struct inode
*inode
)
916 * to write WRITE_GRANULARITY pages to a file by extents we have to
917 * reserve disk space for:
919 * 1. find_file_item may have to insert empty node to the tree (empty
920 * leaf node between two extent items). This requires 1 block and
921 * number of blocks which are necessary to perform insertion of an
922 * internal item into twig level.
924 * 2. for each of written pages there might be needed 1 block and
925 * number of blocks which might be necessary to perform insertion of or
926 * paste to an extent item.
928 * 3. stat data update
930 tree
= reiser4_tree_by_inode(inode
);
931 count
= estimate_one_insert_item(tree
) +
932 WRITE_GRANULARITY
* (1 + estimate_one_insert_into_item(tree
)) +
933 estimate_one_insert_item(tree
);
935 return reiser4_grab_space(count
, 0 /* flags */);
939 * filemap_copy_from_user no longer exists in generic code, because it
940 * is deadlocky (copying from user while holding the page lock is bad).
941 * As a temporary fix for reiser4, just define it here.
944 filemap_copy_from_user(struct page
*page
, unsigned long offset
,
945 const char __user
*buf
, unsigned bytes
)
950 kaddr
= kmap_atomic(page
, KM_USER0
);
951 left
= __copy_from_user_inatomic_nocache(kaddr
+ offset
, buf
, bytes
);
952 kunmap_atomic(kaddr
, KM_USER0
);
955 /* Do it the slow way */
957 left
= __copy_from_user_nocache(kaddr
+ offset
, buf
, bytes
);
964 * reiser4_write_extent - write method of extent item plugin
965 * @file: file to write to
966 * @buf: address of user-space buffer
967 * @count: number of bytes to write
968 * @pos: position in file to write to
971 ssize_t
reiser4_write_extent(struct file
*file
, struct inode
* inode
,
972 const char __user
*buf
, size_t count
, loff_t
*pos
)
974 int have_to_update_extent
;
975 int nr_pages
, nr_dirty
;
977 jnode
*jnodes
[WRITE_GRANULARITY
+ 1];
981 int to_page
, page_off
;
982 size_t left
, written
;
985 if (write_extent_reserve_space(inode
))
986 return RETERR(-ENOSPC
);
990 update_extents(file
, inode
, jnodes
, 0, *pos
);
994 BUG_ON(get_current_context()->trans
->atom
!= NULL
);
997 index
= *pos
>> PAGE_CACHE_SHIFT
;
998 /* calculate number of pages which are to be written */
999 end
= ((*pos
+ count
- 1) >> PAGE_CACHE_SHIFT
);
1000 nr_pages
= end
- index
+ 1;
1002 assert("", nr_pages
<= WRITE_GRANULARITY
+ 1);
1004 /* get pages and jnodes */
1005 for (i
= 0; i
< nr_pages
; i
++) {
1006 page
= find_or_create_page(inode
->i_mapping
, index
+ i
,
1007 reiser4_ctx_gfp_mask_get());
1010 result
= RETERR(-ENOMEM
);
1014 jnodes
[i
] = jnode_of_page(page
);
1015 if (IS_ERR(jnodes
[i
])) {
1017 page_cache_release(page
);
1019 result
= RETERR(-ENOMEM
);
1022 /* prevent jnode and page from disconnecting */
1023 JF_SET(jnodes
[i
], JNODE_WRITE_PREPARED
);
1027 BUG_ON(get_current_context()->trans
->atom
!= NULL
);
1029 have_to_update_extent
= 0;
1031 page_off
= (*pos
& (PAGE_CACHE_SIZE
- 1));
1032 for (i
= 0; i
< nr_pages
; i
++) {
1033 to_page
= PAGE_CACHE_SIZE
- page_off
;
1036 page
= jnode_page(jnodes
[i
]);
1037 if (page_offset(page
) < inode
->i_size
&&
1038 !PageUptodate(page
) && to_page
!= PAGE_CACHE_SIZE
) {
1040 * the above is not optimal for partial write to last
1041 * page of file when file size is not at boundary of
1045 if (!PageUptodate(page
)) {
1046 result
= readpage_unix_file(NULL
, page
);
1047 BUG_ON(result
!= 0);
1048 /* wait for read completion */
1050 BUG_ON(!PageUptodate(page
));
1056 BUG_ON(get_current_context()->trans
->atom
!= NULL
);
1057 fault_in_pages_readable(buf
, to_page
);
1058 BUG_ON(get_current_context()->trans
->atom
!= NULL
);
1061 if (!PageUptodate(page
) && to_page
!= PAGE_CACHE_SIZE
)
1062 zero_user_segments(page
, 0, page_off
,
1066 written
= filemap_copy_from_user(page
, page_off
, buf
, to_page
);
1067 if (unlikely(written
!= to_page
)) {
1069 result
= RETERR(-EFAULT
);
1073 flush_dcache_page(page
);
1074 reiser4_set_page_dirty_internal(page
);
1078 mark_page_accessed(page
);
1079 SetPageUptodate(page
);
1081 if (jnodes
[i
]->blocknr
== 0)
1082 have_to_update_extent
++;
1087 BUG_ON(get_current_context()->trans
->atom
!= NULL
);
1090 if (have_to_update_extent
) {
1091 update_extents(file
, inode
, jnodes
, nr_dirty
, *pos
);
1093 for (i
= 0; i
< nr_dirty
; i
++) {
1095 spin_lock_jnode(jnodes
[i
]);
1096 ret
= reiser4_try_capture(jnodes
[i
],
1097 ZNODE_WRITE_LOCK
, 0);
1099 jnode_make_dirty_locked(jnodes
[i
]);
1100 spin_unlock_jnode(jnodes
[i
]);
1104 for (i
= 0; i
< nr_pages
; i
++) {
1105 page_cache_release(jnode_page(jnodes
[i
]));
1106 JF_CLR(jnodes
[i
], JNODE_WRITE_PREPARED
);
1110 /* the only errors handled so far is ENOMEM and
1111 EFAULT on copy_from_user */
1113 return (count
- left
) ? (count
- left
) : result
;
1116 int reiser4_do_readpage_extent(reiser4_extent
* ext
, reiser4_block_nr pos
,
1120 struct address_space
*mapping
;
1121 unsigned long index
;
1123 reiser4_block_nr block
;
1125 mapping
= page
->mapping
;
1126 oid
= get_inode_oid(mapping
->host
);
1127 index
= page
->index
;
1129 switch (state_of_extent(ext
)) {
1132 * it is possible to have hole page with jnode, if page was
1133 * eflushed previously.
1135 j
= jfind(mapping
, index
);
1137 zero_user(page
, 0, PAGE_CACHE_SIZE
);
1138 SetPageUptodate(page
);
1143 if (!jnode_page(j
)) {
1144 jnode_attach_page(j
, page
);
1146 BUG_ON(jnode_page(j
) != page
);
1147 assert("vs-1504", jnode_page(j
) == page
);
1149 block
= *jnode_get_io_block(j
);
1150 spin_unlock_jnode(j
);
1152 zero_user(page
, 0, PAGE_CACHE_SIZE
);
1153 SetPageUptodate(page
);
1160 case ALLOCATED_EXTENT
:
1161 j
= jnode_of_page(page
);
1164 if (*jnode_get_block(j
) == 0) {
1165 reiser4_block_nr blocknr
;
1167 blocknr
= extent_get_start(ext
) + pos
;
1168 jnode_set_block(j
, &blocknr
);
1171 j
->blocknr
== extent_get_start(ext
) + pos
);
1174 case UNALLOCATED_EXTENT
:
1175 j
= jfind(mapping
, index
);
1176 assert("nikita-2688", j
);
1177 assert("vs-1426", jnode_page(j
) == NULL
);
1180 jnode_attach_page(j
, page
);
1181 spin_unlock_jnode(j
);
1185 warning("vs-957", "wrong extent\n");
1186 return RETERR(-EIO
);
1190 reiser4_page_io(page
, j
, READ
, reiser4_ctx_gfp_mask_get());
1195 /* Implements plugin->u.item.s.file.read operation for extent items. */
1196 int reiser4_read_extent(struct file
*file
, flow_t
*flow
, hint_t
*hint
)
1200 unsigned long cur_page
, next_page
;
1201 unsigned long page_off
, count
;
1202 struct address_space
*mapping
;
1204 uf_coord_t
*uf_coord
;
1206 struct extent_coord_extension
*ext_coord
;
1207 unsigned long nr_pages
;
1210 assert("vs-1353", current_blocksize
== PAGE_CACHE_SIZE
);
1211 assert("vs-572", flow
->user
== 1);
1212 assert("vs-1351", flow
->length
> 0);
1214 uf_coord
= &hint
->ext_coord
;
1216 check_uf_coord(uf_coord
, NULL
);
1217 assert("vs-33", uf_coord
->lh
== &hint
->lh
);
1219 coord
= &uf_coord
->coord
;
1220 assert("vs-1119", znode_is_rlocked(coord
->node
));
1221 assert("vs-1120", znode_is_loaded(coord
->node
));
1222 assert("vs-1256", coord_matches_key_extent(coord
, &flow
->key
));
1224 mapping
= file
->f_dentry
->d_inode
->i_mapping
;
1225 ext_coord
= &uf_coord
->extension
.extent
;
1227 /* offset in a file to start read from */
1228 file_off
= get_key_offset(&flow
->key
);
1229 /* offset within the page to start read from */
1230 page_off
= (unsigned long)(file_off
& (PAGE_CACHE_SIZE
- 1));
1231 /* bytes which can be read from the page which contains file_off */
1232 count
= PAGE_CACHE_SIZE
- page_off
;
1234 /* index of page containing offset read is to start from */
1235 cur_page
= (unsigned long)(file_off
>> PAGE_CACHE_SHIFT
);
1236 next_page
= cur_page
;
1237 /* number of pages flow spans over */
1239 ((file_off
+ flow
->length
+ PAGE_CACHE_SIZE
-
1240 1) >> PAGE_CACHE_SHIFT
) - cur_page
;
1242 /* we start having twig node read locked. However, we do not want to
1243 keep that lock all the time readahead works. So, set a sel and
1244 release twig node. */
1245 reiser4_set_hint(hint
, &flow
->key
, ZNODE_READ_LOCK
);
1246 /* &hint->lh is done-ed */
1249 reiser4_txn_restart_current();
1250 page
= read_mapping_page(mapping
, cur_page
, file
);
1252 return PTR_ERR(page
);
1254 if (!PageUptodate(page
)) {
1256 page_cache_release(page
);
1257 warning("jmacd-97178", "extent_read: page is not up to date");
1258 return RETERR(-EIO
);
1260 mark_page_accessed(page
);
1263 /* If users can be writing to this page using arbitrary virtual
1264 addresses, take care about potential aliasing before reading
1265 the page on the kernel side.
1267 if (mapping_writably_mapped(mapping
))
1268 flush_dcache_page(page
);
1270 assert("nikita-3034", reiser4_schedulable());
1272 /* number of bytes which are to be read from the page */
1273 if (count
> flow
->length
)
1274 count
= flow
->length
;
1276 result
= fault_in_pages_writeable(flow
->data
, count
);
1278 page_cache_release(page
);
1279 return RETERR(-EFAULT
);
1282 kaddr
= kmap_atomic(page
, KM_USER0
);
1283 result
= __copy_to_user_inatomic(flow
->data
,
1284 kaddr
+ page_off
, count
);
1285 kunmap_atomic(kaddr
, KM_USER0
);
1288 result
= __copy_to_user(flow
->data
, kaddr
+ page_off
, count
);
1290 if (unlikely(result
))
1291 return RETERR(-EFAULT
);
1294 page_cache_release(page
);
1296 /* increase key (flow->key), update user area pointer (flow->data) */
1297 move_flow_forward(flow
, count
);
1301 count
= PAGE_CACHE_SIZE
;
1303 } while (flow
->length
);
1309 plugin->s.file.readpage
1310 reiser4_read->unix_file_read->page_cache_readahead->reiser4_readpage->unix_file_readpage->extent_readpage
1312 filemap_fault->reiser4_readpage->readpage_unix_file->->readpage_extent
1314 At the beginning: coord->node is read locked, zloaded, page is
1315 locked, coord is set to existing unit inside of extent item (it is not necessary that coord matches to page->index)
1317 int reiser4_readpage_extent(void *vp
, struct page
*page
)
1319 uf_coord_t
*uf_coord
= vp
;
1320 ON_DEBUG(coord_t
* coord
= &uf_coord
->coord
);
1321 ON_DEBUG(reiser4_key key
);
1323 assert("vs-1040", PageLocked(page
));
1324 assert("vs-1050", !PageUptodate(page
));
1325 assert("vs-1039", page
->mapping
&& page
->mapping
->host
);
1327 assert("vs-1044", znode_is_loaded(coord
->node
));
1328 assert("vs-758", item_is_extent(coord
));
1329 assert("vs-1046", coord_is_existing_unit(coord
));
1330 assert("vs-1045", znode_is_rlocked(coord
->node
));
1332 page
->mapping
->host
->i_ino
==
1333 get_key_objectid(item_key_by_coord(coord
, &key
)));
1334 check_uf_coord(uf_coord
, NULL
);
1336 return reiser4_do_readpage_extent(
1337 ext_by_ext_coord(uf_coord
),
1338 uf_coord
->extension
.extent
.pos_in_unit
, page
);
1342 * get_block_address_extent
1349 int get_block_address_extent(const coord_t
*coord
, sector_t block
,
1352 reiser4_extent
*ext
;
1354 if (!coord_is_existing_unit(coord
))
1355 return RETERR(-EINVAL
);
1357 ext
= extent_by_coord(coord
);
1359 if (state_of_extent(ext
) != ALLOCATED_EXTENT
)
1360 /* FIXME: bad things may happen if it is unallocated extent */
1365 unit_key_by_coord(coord
, &key
);
1367 block
>= get_key_offset(&key
) >> current_blocksize_bits
);
1370 (get_key_offset(&key
) >> current_blocksize_bits
) +
1371 extent_get_width(ext
));
1373 extent_get_start(ext
) + (block
-
1374 (get_key_offset(&key
) >>
1375 current_blocksize_bits
));
1381 plugin->u.item.s.file.append_key
1382 key of first byte which is the next to last byte by addressed by this extent
1384 reiser4_key
*append_key_extent(const coord_t
* coord
, reiser4_key
* key
)
1386 item_key_by_coord(coord
, key
);
1388 get_key_offset(key
) + reiser4_extent_size(coord
,
1392 assert("vs-610", get_key_offset(key
)
1393 && (get_key_offset(key
) & (current_blocksize
- 1)) == 0);
1397 /* plugin->u.item.s.file.init_coord_extension */
1398 void init_coord_extension_extent(uf_coord_t
* uf_coord
, loff_t lookuped
)
1401 struct extent_coord_extension
*ext_coord
;
1405 assert("vs-1295", uf_coord
->valid
== 0);
1407 coord
= &uf_coord
->coord
;
1408 assert("vs-1288", coord_is_iplug_set(coord
));
1409 assert("vs-1327", znode_is_loaded(coord
->node
));
1411 if (coord
->between
!= AFTER_UNIT
&& coord
->between
!= AT_UNIT
)
1414 ext_coord
= &uf_coord
->extension
.extent
;
1415 ext_coord
->nr_units
= nr_units_extent(coord
);
1416 ext_coord
->ext_offset
=
1417 (char *)extent_by_coord(coord
) - zdata(coord
->node
);
1418 ext_coord
->width
= extent_get_width(extent_by_coord(coord
));
1419 ON_DEBUG(ext_coord
->extent
= *extent_by_coord(coord
));
1420 uf_coord
->valid
= 1;
1422 /* pos_in_unit is the only uninitialized field in extended coord */
1423 if (coord
->between
== AFTER_UNIT
) {
1425 coord
->unit_pos
== nr_units_extent(coord
) - 1);
1427 ext_coord
->pos_in_unit
= ext_coord
->width
- 1;
1430 unit_key_by_coord(coord
, &key
);
1431 offset
= get_key_offset(&key
);
1433 assert("vs-1328", offset
<= lookuped
);
1436 offset
+ ext_coord
->width
* current_blocksize
);
1437 ext_coord
->pos_in_unit
=
1438 ((lookuped
- offset
) >> current_blocksize_bits
);
1444 * c-indentation-style: "K&R"