ERROR: code indent should use tabs where possible
[mmotm.git] / fs / reiser4 / plugin / item / extent_file_ops.c
blobeae05e09343ea7dfc0ebbfb338cb0a7cbcdb1cb3
1 /* COPYRIGHT 2001, 2002, 2003 by Hans Reiser, licensing governed by reiser4/README */
3 #include "item.h"
4 #include "../../inode.h"
5 #include "../../page_cache.h"
6 #include "../object.h"
8 #include <linux/quotaops.h>
9 #include <linux/swap.h>
11 static inline reiser4_extent *ext_by_offset(const znode *node, int offset)
13 reiser4_extent *ext;
15 ext = (reiser4_extent *) (zdata(node) + offset);
16 return ext;
19 /**
20 * check_uf_coord - verify coord extension
21 * @uf_coord:
22 * @key:
24 * Makes sure that all fields of @uf_coord are set properly. If @key is
25 * specified - check whether @uf_coord is set correspondingly.
27 static void check_uf_coord(const uf_coord_t *uf_coord, const reiser4_key *key)
29 #if REISER4_DEBUG
30 const coord_t *coord;
31 const struct extent_coord_extension *ext_coord;
32 reiser4_extent *ext;
34 coord = &uf_coord->coord;
35 ext_coord = &uf_coord->extension.extent;
36 ext = ext_by_offset(coord->node, uf_coord->extension.extent.ext_offset);
38 assert("",
39 WITH_DATA(coord->node,
40 (uf_coord->valid == 1 &&
41 coord_is_iplug_set(coord) &&
42 item_is_extent(coord) &&
43 ext_coord->nr_units == nr_units_extent(coord) &&
44 ext == extent_by_coord(coord) &&
45 ext_coord->width == extent_get_width(ext) &&
46 coord->unit_pos < ext_coord->nr_units &&
47 ext_coord->pos_in_unit < ext_coord->width &&
48 memcmp(ext, &ext_coord->extent,
49 sizeof(reiser4_extent)) == 0)));
50 if (key) {
51 reiser4_key coord_key;
53 unit_key_by_coord(&uf_coord->coord, &coord_key);
54 set_key_offset(&coord_key,
55 get_key_offset(&coord_key) +
56 (uf_coord->extension.extent.
57 pos_in_unit << PAGE_CACHE_SHIFT));
58 assert("", keyeq(key, &coord_key));
60 #endif
63 static inline reiser4_extent *ext_by_ext_coord(const uf_coord_t *uf_coord)
65 check_uf_coord(uf_coord, NULL);
67 return ext_by_offset(uf_coord->coord.node,
68 uf_coord->extension.extent.ext_offset);
71 #if REISER4_DEBUG
73 /**
74 * offset_is_in_unit
79 /* return 1 if offset @off is inside of extent unit pointed to by @coord. Set
80 pos_in_unit inside of unit correspondingly */
81 static int offset_is_in_unit(const coord_t *coord, loff_t off)
83 reiser4_key unit_key;
84 __u64 unit_off;
85 reiser4_extent *ext;
87 ext = extent_by_coord(coord);
89 unit_key_extent(coord, &unit_key);
90 unit_off = get_key_offset(&unit_key);
91 if (off < unit_off)
92 return 0;
93 if (off >= (unit_off + (current_blocksize * extent_get_width(ext))))
94 return 0;
95 return 1;
98 static int
99 coord_matches_key_extent(const coord_t * coord, const reiser4_key * key)
101 reiser4_key item_key;
103 assert("vs-771", coord_is_existing_unit(coord));
104 assert("vs-1258", keylt(key, append_key_extent(coord, &item_key)));
105 assert("vs-1259", keyge(key, item_key_by_coord(coord, &item_key)));
107 return offset_is_in_unit(coord, get_key_offset(key));
110 #endif
113 * can_append -
114 * @key:
115 * @coord:
117 * Returns 1 if @key is equal to an append key of item @coord is set to
119 static int can_append(const reiser4_key *key, const coord_t *coord)
121 reiser4_key append_key;
123 return keyeq(key, append_key_extent(coord, &append_key));
127 * append_hole
128 * @coord:
129 * @lh:
130 * @key:
133 static int append_hole(coord_t *coord, lock_handle *lh,
134 const reiser4_key *key)
136 reiser4_key append_key;
137 reiser4_block_nr hole_width;
138 reiser4_extent *ext, new_ext;
139 reiser4_item_data idata;
141 /* last item of file may have to be appended with hole */
142 assert("vs-708", znode_get_level(coord->node) == TWIG_LEVEL);
143 assert("vs-714", item_id_by_coord(coord) == EXTENT_POINTER_ID);
145 /* key of first byte which is not addressed by this extent */
146 append_key_extent(coord, &append_key);
148 assert("", keyle(&append_key, key));
151 * extent item has to be appended with hole. Calculate length of that
152 * hole
154 hole_width = ((get_key_offset(key) - get_key_offset(&append_key) +
155 current_blocksize - 1) >> current_blocksize_bits);
156 assert("vs-954", hole_width > 0);
158 /* set coord after last unit */
159 coord_init_after_item_end(coord);
161 /* get last extent in the item */
162 ext = extent_by_coord(coord);
163 if (state_of_extent(ext) == HOLE_EXTENT) {
165 * last extent of a file is hole extent. Widen that extent by
166 * @hole_width blocks. Note that we do not worry about
167 * overflowing - extent width is 64 bits
169 reiser4_set_extent(ext, HOLE_EXTENT_START,
170 extent_get_width(ext) + hole_width);
171 znode_make_dirty(coord->node);
172 return 0;
175 /* append last item of the file with hole extent unit */
176 assert("vs-713", (state_of_extent(ext) == ALLOCATED_EXTENT ||
177 state_of_extent(ext) == UNALLOCATED_EXTENT));
179 reiser4_set_extent(&new_ext, HOLE_EXTENT_START, hole_width);
180 init_new_extent(&idata, &new_ext, 1);
181 return insert_into_item(coord, lh, &append_key, &idata, 0);
185 * check_jnodes
186 * @twig: longterm locked twig node
187 * @key:
190 static void check_jnodes(znode *twig, const reiser4_key *key, int count)
192 #if REISER4_DEBUG
193 coord_t c;
194 reiser4_key node_key, jnode_key;
196 jnode_key = *key;
198 assert("", twig != NULL);
199 assert("", znode_get_level(twig) == TWIG_LEVEL);
200 assert("", znode_is_write_locked(twig));
202 zload(twig);
203 /* get the smallest key in twig node */
204 coord_init_first_unit(&c, twig);
205 unit_key_by_coord(&c, &node_key);
206 assert("", keyle(&node_key, &jnode_key));
208 coord_init_last_unit(&c, twig);
209 unit_key_by_coord(&c, &node_key);
210 if (item_plugin_by_coord(&c)->s.file.append_key)
211 item_plugin_by_coord(&c)->s.file.append_key(&c, &node_key);
212 set_key_offset(&jnode_key,
213 get_key_offset(&jnode_key) + (loff_t)count * PAGE_CACHE_SIZE - 1);
214 assert("", keylt(&jnode_key, &node_key));
215 zrelse(twig);
216 #endif
220 * append_last_extent - append last file item
221 * @uf_coord: coord to start insertion from
222 * @jnodes: array of jnodes
223 * @count: number of jnodes in the array
225 * There is already at least one extent item of file @inode in the tree. Append
226 * the last of them with unallocated extent unit of width @count. Assign
227 * fake block numbers to jnodes corresponding to the inserted extent.
229 static int append_last_extent(uf_coord_t *uf_coord, const reiser4_key *key,
230 jnode **jnodes, int count)
232 int result;
233 reiser4_extent new_ext;
234 reiser4_item_data idata;
235 coord_t *coord;
236 struct extent_coord_extension *ext_coord;
237 reiser4_extent *ext;
238 reiser4_block_nr block;
239 jnode *node;
240 int i;
242 coord = &uf_coord->coord;
243 ext_coord = &uf_coord->extension.extent;
244 ext = ext_by_ext_coord(uf_coord);
246 /* check correctness of position in the item */
247 assert("vs-228", coord->unit_pos == coord_last_unit_pos(coord));
248 assert("vs-1311", coord->between == AFTER_UNIT);
249 assert("vs-1302", ext_coord->pos_in_unit == ext_coord->width - 1);
251 if (!can_append(key, coord)) {
252 /* hole extent has to be inserted */
253 result = append_hole(coord, uf_coord->lh, key);
254 uf_coord->valid = 0;
255 return result;
258 if (count == 0)
259 return 0;
261 assert("", get_key_offset(key) == (loff_t)index_jnode(jnodes[0]) * PAGE_CACHE_SIZE);
263 result = DQUOT_ALLOC_BLOCK_NODIRTY(mapping_jnode(jnodes[0])->host,
264 count);
265 BUG_ON(result != 0);
267 switch (state_of_extent(ext)) {
268 case UNALLOCATED_EXTENT:
270 * last extent unit of the file is unallocated one. Increase
271 * its width by @count
273 reiser4_set_extent(ext, UNALLOCATED_EXTENT_START,
274 extent_get_width(ext) + count);
275 znode_make_dirty(coord->node);
277 /* update coord extension */
278 ext_coord->width += count;
279 ON_DEBUG(extent_set_width
280 (&uf_coord->extension.extent.extent,
281 ext_coord->width));
282 break;
284 case HOLE_EXTENT:
285 case ALLOCATED_EXTENT:
287 * last extent unit of the file is either hole or allocated
288 * one. Append one unallocated extent of width @count
290 reiser4_set_extent(&new_ext, UNALLOCATED_EXTENT_START, count);
291 init_new_extent(&idata, &new_ext, 1);
292 result = insert_into_item(coord, uf_coord->lh, key, &idata, 0);
293 uf_coord->valid = 0;
294 if (result)
295 return result;
296 break;
298 default:
299 return RETERR(-EIO);
303 * make sure that we hold long term locked twig node containing all
304 * jnodes we are about to capture
306 check_jnodes(uf_coord->lh->node, key, count);
309 * assign fake block numbers to all jnodes. FIXME: make sure whether
310 * twig node containing inserted extent item is locked
312 block = fake_blocknr_unformatted(count);
313 for (i = 0; i < count; i ++, block ++) {
314 node = jnodes[i];
315 spin_lock_jnode(node);
316 JF_SET(node, JNODE_CREATED);
317 jnode_set_block(node, &block);
318 result = reiser4_try_capture(node, ZNODE_WRITE_LOCK, 0);
319 BUG_ON(result != 0);
320 jnode_make_dirty_locked(node);
321 spin_unlock_jnode(node);
323 return count;
327 * insert_first_hole - inser hole extent into tree
328 * @coord:
329 * @lh:
330 * @key:
334 static int insert_first_hole(coord_t *coord, lock_handle *lh,
335 const reiser4_key *key)
337 reiser4_extent new_ext;
338 reiser4_item_data idata;
339 reiser4_key item_key;
340 reiser4_block_nr hole_width;
342 /* @coord must be set for inserting of new item */
343 assert("vs-711", coord_is_between_items(coord));
345 item_key = *key;
346 set_key_offset(&item_key, 0ull);
348 hole_width = ((get_key_offset(key) + current_blocksize - 1) >>
349 current_blocksize_bits);
350 assert("vs-710", hole_width > 0);
352 /* compose body of hole extent and insert item into tree */
353 reiser4_set_extent(&new_ext, HOLE_EXTENT_START, hole_width);
354 init_new_extent(&idata, &new_ext, 1);
355 return insert_extent_by_coord(coord, &idata, &item_key, lh);
360 * insert_first_extent - insert first file item
361 * @inode: inode of file
362 * @uf_coord: coord to start insertion from
363 * @jnodes: array of jnodes
364 * @count: number of jnodes in the array
365 * @inode:
367 * There are no items of file @inode in the tree yet. Insert unallocated extent
368 * of width @count into tree or hole extent if writing not to the
369 * beginning. Assign fake block numbers to jnodes corresponding to the inserted
370 * unallocated extent. Returns number of jnodes or error code.
372 static int insert_first_extent(uf_coord_t *uf_coord, const reiser4_key *key,
373 jnode **jnodes, int count,
374 struct inode *inode)
376 int result;
377 int i;
378 reiser4_extent new_ext;
379 reiser4_item_data idata;
380 reiser4_block_nr block;
381 struct unix_file_info *uf_info;
382 jnode *node;
384 /* first extent insertion starts at leaf level */
385 assert("vs-719", znode_get_level(uf_coord->coord.node) == LEAF_LEVEL);
386 assert("vs-711", coord_is_between_items(&uf_coord->coord));
388 if (get_key_offset(key) != 0) {
389 result = insert_first_hole(&uf_coord->coord, uf_coord->lh, key);
390 uf_coord->valid = 0;
391 uf_info = unix_file_inode_data(inode);
394 * first item insertion is only possible when writing to empty
395 * file or performing tail conversion
397 assert("", (uf_info->container == UF_CONTAINER_EMPTY ||
398 (reiser4_inode_get_flag(inode,
399 REISER4_PART_MIXED) &&
400 reiser4_inode_get_flag(inode,
401 REISER4_PART_IN_CONV))));
402 /* if file was empty - update its state */
403 if (result == 0 && uf_info->container == UF_CONTAINER_EMPTY)
404 uf_info->container = UF_CONTAINER_EXTENTS;
405 return result;
408 if (count == 0)
409 return 0;
411 result = DQUOT_ALLOC_BLOCK_NODIRTY(mapping_jnode(jnodes[0])->host, count);
412 BUG_ON(result != 0);
415 * prepare for tree modification: compose body of item and item data
416 * structure needed for insertion
418 reiser4_set_extent(&new_ext, UNALLOCATED_EXTENT_START, count);
419 init_new_extent(&idata, &new_ext, 1);
421 /* insert extent item into the tree */
422 result = insert_extent_by_coord(&uf_coord->coord, &idata, key,
423 uf_coord->lh);
424 if (result)
425 return result;
428 * make sure that we hold long term locked twig node containing all
429 * jnodes we are about to capture
431 check_jnodes(uf_coord->lh->node, key, count);
433 * assign fake block numbers to all jnodes, capture and mark them dirty
435 block = fake_blocknr_unformatted(count);
436 for (i = 0; i < count; i ++, block ++) {
437 node = jnodes[i];
438 spin_lock_jnode(node);
439 JF_SET(node, JNODE_CREATED);
440 jnode_set_block(node, &block);
441 result = reiser4_try_capture(node, ZNODE_WRITE_LOCK, 0);
442 BUG_ON(result != 0);
443 jnode_make_dirty_locked(node);
444 spin_unlock_jnode(node);
448 * invalidate coordinate, research must be performed to continue
449 * because write will continue on twig level
451 uf_coord->valid = 0;
452 return count;
456 * plug_hole - replace hole extent with unallocated and holes
457 * @uf_coord:
458 * @key:
459 * @node:
460 * @h: structure containing coordinate, lock handle, key, etc
462 * Creates an unallocated extent of width 1 within a hole. In worst case two
463 * additional extents can be created.
465 static int plug_hole(uf_coord_t *uf_coord, const reiser4_key *key, int *how)
467 struct replace_handle rh;
468 reiser4_extent *ext;
469 reiser4_block_nr width, pos_in_unit;
470 coord_t *coord;
471 struct extent_coord_extension *ext_coord;
472 int return_inserted_position;
474 check_uf_coord(uf_coord, key);
476 rh.coord = coord_by_uf_coord(uf_coord);
477 rh.lh = uf_coord->lh;
478 rh.flags = 0;
480 coord = coord_by_uf_coord(uf_coord);
481 ext_coord = ext_coord_by_uf_coord(uf_coord);
482 ext = ext_by_ext_coord(uf_coord);
484 width = ext_coord->width;
485 pos_in_unit = ext_coord->pos_in_unit;
487 *how = 0;
488 if (width == 1) {
489 reiser4_set_extent(ext, UNALLOCATED_EXTENT_START, 1);
490 znode_make_dirty(coord->node);
491 /* update uf_coord */
492 ON_DEBUG(ext_coord->extent = *ext);
493 *how = 1;
494 return 0;
495 } else if (pos_in_unit == 0) {
496 /* we deal with first element of extent */
497 if (coord->unit_pos) {
498 /* there is an extent to the left */
499 if (state_of_extent(ext - 1) == UNALLOCATED_EXTENT) {
501 * left neighboring unit is an unallocated
502 * extent. Increase its width and decrease
503 * width of hole
505 extent_set_width(ext - 1,
506 extent_get_width(ext - 1) + 1);
507 extent_set_width(ext, width - 1);
508 znode_make_dirty(coord->node);
510 /* update coord extension */
511 coord->unit_pos--;
512 ext_coord->width = extent_get_width(ext - 1);
513 ext_coord->pos_in_unit = ext_coord->width - 1;
514 ext_coord->ext_offset -= sizeof(reiser4_extent);
515 ON_DEBUG(ext_coord->extent =
516 *extent_by_coord(coord));
517 *how = 2;
518 return 0;
521 /* extent for replace */
522 reiser4_set_extent(&rh.overwrite, UNALLOCATED_EXTENT_START, 1);
523 /* extent to be inserted */
524 reiser4_set_extent(&rh.new_extents[0], HOLE_EXTENT_START,
525 width - 1);
526 rh.nr_new_extents = 1;
528 /* have reiser4_replace_extent to return with @coord and
529 @uf_coord->lh set to unit which was replaced */
530 return_inserted_position = 0;
531 *how = 3;
532 } else if (pos_in_unit == width - 1) {
533 /* we deal with last element of extent */
534 if (coord->unit_pos < nr_units_extent(coord) - 1) {
535 /* there is an extent unit to the right */
536 if (state_of_extent(ext + 1) == UNALLOCATED_EXTENT) {
538 * right neighboring unit is an unallocated
539 * extent. Increase its width and decrease
540 * width of hole
542 extent_set_width(ext + 1,
543 extent_get_width(ext + 1) + 1);
544 extent_set_width(ext, width - 1);
545 znode_make_dirty(coord->node);
547 /* update coord extension */
548 coord->unit_pos++;
549 ext_coord->width = extent_get_width(ext + 1);
550 ext_coord->pos_in_unit = 0;
551 ext_coord->ext_offset += sizeof(reiser4_extent);
552 ON_DEBUG(ext_coord->extent =
553 *extent_by_coord(coord));
554 *how = 4;
555 return 0;
558 /* extent for replace */
559 reiser4_set_extent(&rh.overwrite, HOLE_EXTENT_START, width - 1);
560 /* extent to be inserted */
561 reiser4_set_extent(&rh.new_extents[0], UNALLOCATED_EXTENT_START,
563 rh.nr_new_extents = 1;
565 /* have reiser4_replace_extent to return with @coord and
566 @uf_coord->lh set to unit which was inserted */
567 return_inserted_position = 1;
568 *how = 5;
569 } else {
570 /* extent for replace */
571 reiser4_set_extent(&rh.overwrite, HOLE_EXTENT_START,
572 pos_in_unit);
573 /* extents to be inserted */
574 reiser4_set_extent(&rh.new_extents[0], UNALLOCATED_EXTENT_START,
576 reiser4_set_extent(&rh.new_extents[1], HOLE_EXTENT_START,
577 width - pos_in_unit - 1);
578 rh.nr_new_extents = 2;
580 /* have reiser4_replace_extent to return with @coord and
581 @uf_coord->lh set to first of units which were inserted */
582 return_inserted_position = 1;
583 *how = 6;
585 unit_key_by_coord(coord, &rh.paste_key);
586 set_key_offset(&rh.paste_key, get_key_offset(&rh.paste_key) +
587 extent_get_width(&rh.overwrite) * current_blocksize);
589 uf_coord->valid = 0;
590 return reiser4_replace_extent(&rh, return_inserted_position);
594 * overwrite_one_block -
595 * @uf_coord:
596 * @key:
597 * @node:
599 * If @node corresponds to hole extent - create unallocated extent for it and
600 * assign fake block number. If @node corresponds to allocated extent - assign
601 * block number of jnode
603 static int overwrite_one_block(uf_coord_t *uf_coord, const reiser4_key *key,
604 jnode *node, int *hole_plugged)
606 int result;
607 struct extent_coord_extension *ext_coord;
608 reiser4_extent *ext;
609 reiser4_block_nr block;
610 int how;
612 assert("vs-1312", uf_coord->coord.between == AT_UNIT);
614 result = 0;
615 ext_coord = ext_coord_by_uf_coord(uf_coord);
616 ext = ext_by_ext_coord(uf_coord);
617 assert("", state_of_extent(ext) != UNALLOCATED_EXTENT);
619 switch (state_of_extent(ext)) {
620 case ALLOCATED_EXTENT:
621 block = extent_get_start(ext) + ext_coord->pos_in_unit;
622 break;
624 case HOLE_EXTENT:
625 result = DQUOT_ALLOC_BLOCK_NODIRTY(mapping_jnode(node)->host, 1);
626 BUG_ON(result != 0);
627 result = plug_hole(uf_coord, key, &how);
628 if (result)
629 return result;
630 block = fake_blocknr_unformatted(1);
631 if (hole_plugged)
632 *hole_plugged = 1;
633 JF_SET(node, JNODE_CREATED);
634 break;
636 default:
637 return RETERR(-EIO);
640 jnode_set_block(node, &block);
641 return 0;
645 * move_coord - move coordinate forward
646 * @uf_coord:
648 * Move coordinate one data block pointer forward. Return 1 if coord is set to
649 * the last one already or is invalid.
651 static int move_coord(uf_coord_t *uf_coord)
653 struct extent_coord_extension *ext_coord;
655 if (uf_coord->valid == 0)
656 return 1;
657 ext_coord = &uf_coord->extension.extent;
658 ext_coord->pos_in_unit ++;
659 if (ext_coord->pos_in_unit < ext_coord->width)
660 /* coordinate moved within the unit */
661 return 0;
663 /* end of unit is reached. Try to move to next unit */
664 ext_coord->pos_in_unit = 0;
665 uf_coord->coord.unit_pos ++;
666 if (uf_coord->coord.unit_pos < ext_coord->nr_units) {
667 /* coordinate moved to next unit */
668 ext_coord->ext_offset += sizeof(reiser4_extent);
669 ext_coord->width =
670 extent_get_width(ext_by_offset
671 (uf_coord->coord.node,
672 ext_coord->ext_offset));
673 ON_DEBUG(ext_coord->extent =
674 *ext_by_offset(uf_coord->coord.node,
675 ext_coord->ext_offset));
676 return 0;
678 /* end of item is reached */
679 uf_coord->valid = 0;
680 return 1;
684 * overwrite_extent -
685 * @inode:
687 * Returns number of handled jnodes.
689 static int overwrite_extent(uf_coord_t *uf_coord, const reiser4_key *key,
690 jnode **jnodes, int count, int *plugged_hole)
692 int result;
693 reiser4_key k;
694 int i;
695 jnode *node;
697 k = *key;
698 for (i = 0; i < count; i ++) {
699 node = jnodes[i];
700 if (*jnode_get_block(node) == 0) {
701 result = overwrite_one_block(uf_coord, &k, node, plugged_hole);
702 if (result)
703 return result;
706 * make sure that we hold long term locked twig node containing
707 * all jnodes we are about to capture
709 check_jnodes(uf_coord->lh->node, &k, 1);
711 * assign fake block numbers to all jnodes, capture and mark
712 * them dirty
714 spin_lock_jnode(node);
715 result = reiser4_try_capture(node, ZNODE_WRITE_LOCK, 0);
716 BUG_ON(result != 0);
717 jnode_make_dirty_locked(node);
718 spin_unlock_jnode(node);
720 if (uf_coord->valid == 0)
721 return i + 1;
723 check_uf_coord(uf_coord, &k);
725 if (move_coord(uf_coord)) {
727 * failed to move to the next node pointer. Either end
728 * of file or end of twig node is reached. In the later
729 * case we might go to the right neighbor.
731 uf_coord->valid = 0;
732 return i + 1;
734 set_key_offset(&k, get_key_offset(&k) + PAGE_CACHE_SIZE);
737 return count;
741 * reiser4_update_extent
742 * @file:
743 * @jnodes:
744 * @count:
745 * @off:
748 int reiser4_update_extent(struct inode *inode, jnode *node, loff_t pos,
749 int *plugged_hole)
751 int result;
752 znode *loaded;
753 uf_coord_t uf_coord;
754 coord_t *coord;
755 lock_handle lh;
756 reiser4_key key;
758 assert("", reiser4_lock_counters()->d_refs == 0);
760 key_by_inode_and_offset_common(inode, pos, &key);
762 init_uf_coord(&uf_coord, &lh);
763 coord = &uf_coord.coord;
764 result = find_file_item_nohint(coord, &lh, &key,
765 ZNODE_WRITE_LOCK, inode);
766 if (IS_CBKERR(result)) {
767 assert("", reiser4_lock_counters()->d_refs == 0);
768 return result;
771 result = zload(coord->node);
772 BUG_ON(result != 0);
773 loaded = coord->node;
775 if (coord->between == AFTER_UNIT) {
777 * append existing extent item with unallocated extent of width
778 * nr_jnodes
780 init_coord_extension_extent(&uf_coord,
781 get_key_offset(&key));
782 result = append_last_extent(&uf_coord, &key,
783 &node, 1);
784 } else if (coord->between == AT_UNIT) {
786 * overwrite
787 * not optimal yet. Will be optimized if new write will show
788 * performance win.
790 init_coord_extension_extent(&uf_coord,
791 get_key_offset(&key));
792 result = overwrite_extent(&uf_coord, &key,
793 &node, 1, plugged_hole);
794 } else {
796 * there are no items of this file in the tree yet. Create
797 * first item of the file inserting one unallocated extent of
798 * width nr_jnodes
800 result = insert_first_extent(&uf_coord, &key, &node, 1, inode);
802 assert("", result == 1 || result < 0);
803 zrelse(loaded);
804 done_lh(&lh);
805 assert("", reiser4_lock_counters()->d_refs == 0);
806 return (result == 1) ? 0 : result;
810 * update_extents
811 * @file:
812 * @jnodes:
813 * @count:
814 * @off:
817 static int update_extents(struct file *file, struct inode *inode,
818 jnode **jnodes, int count, loff_t pos)
820 struct hint hint;
821 reiser4_key key;
822 int result;
823 znode *loaded;
825 result = load_file_hint(file, &hint);
826 BUG_ON(result != 0);
828 if (count != 0)
830 * count == 0 is special case: expanding truncate
832 pos = (loff_t)index_jnode(jnodes[0]) << PAGE_CACHE_SHIFT;
833 key_by_inode_and_offset_common(inode, pos, &key);
835 assert("", reiser4_lock_counters()->d_refs == 0);
837 do {
838 result = find_file_item(&hint, &key, ZNODE_WRITE_LOCK, inode);
839 if (IS_CBKERR(result)) {
840 assert("", reiser4_lock_counters()->d_refs == 0);
841 return result;
844 result = zload(hint.ext_coord.coord.node);
845 BUG_ON(result != 0);
846 loaded = hint.ext_coord.coord.node;
848 if (hint.ext_coord.coord.between == AFTER_UNIT) {
850 * append existing extent item with unallocated extent
851 * of width nr_jnodes
853 if (hint.ext_coord.valid == 0)
854 /* NOTE: get statistics on this */
855 init_coord_extension_extent(&hint.ext_coord,
856 get_key_offset(&key));
857 result = append_last_extent(&hint.ext_coord, &key,
858 jnodes, count);
859 } else if (hint.ext_coord.coord.between == AT_UNIT) {
861 * overwrite
862 * not optimal yet. Will be optimized if new write will
863 * show performance win.
865 if (hint.ext_coord.valid == 0)
866 /* NOTE: get statistics on this */
867 init_coord_extension_extent(&hint.ext_coord,
868 get_key_offset(&key));
869 result = overwrite_extent(&hint.ext_coord, &key,
870 jnodes, count, NULL);
871 } else {
873 * there are no items of this file in the tree
874 * yet. Create first item of the file inserting one
875 * unallocated extent of * width nr_jnodes
877 result = insert_first_extent(&hint.ext_coord, &key,
878 jnodes, count, inode);
880 zrelse(loaded);
881 if (result < 0) {
882 done_lh(hint.ext_coord.lh);
883 break;
886 jnodes += result;
887 count -= result;
888 set_key_offset(&key, get_key_offset(&key) + result * PAGE_CACHE_SIZE);
890 /* seal and unlock znode */
891 if (hint.ext_coord.valid)
892 reiser4_set_hint(&hint, &key, ZNODE_WRITE_LOCK);
893 else
894 reiser4_unset_hint(&hint);
896 } while (count > 0);
898 save_file_hint(file, &hint);
899 assert("", reiser4_lock_counters()->d_refs == 0);
900 return result;
904 * write_extent_reserve_space - reserve space for extent write operation
905 * @inode:
907 * Estimates and reserves space which may be required for writing
908 * WRITE_GRANULARITY pages of file.
910 static int write_extent_reserve_space(struct inode *inode)
912 __u64 count;
913 reiser4_tree *tree;
916 * to write WRITE_GRANULARITY pages to a file by extents we have to
917 * reserve disk space for:
919 * 1. find_file_item may have to insert empty node to the tree (empty
920 * leaf node between two extent items). This requires 1 block and
921 * number of blocks which are necessary to perform insertion of an
922 * internal item into twig level.
924 * 2. for each of written pages there might be needed 1 block and
925 * number of blocks which might be necessary to perform insertion of or
926 * paste to an extent item.
928 * 3. stat data update
930 tree = reiser4_tree_by_inode(inode);
931 count = estimate_one_insert_item(tree) +
932 WRITE_GRANULARITY * (1 + estimate_one_insert_into_item(tree)) +
933 estimate_one_insert_item(tree);
934 grab_space_enable();
935 return reiser4_grab_space(count, 0 /* flags */);
939 * filemap_copy_from_user no longer exists in generic code, because it
940 * is deadlocky (copying from user while holding the page lock is bad).
941 * As a temporary fix for reiser4, just define it here.
943 static inline size_t
944 filemap_copy_from_user(struct page *page, unsigned long offset,
945 const char __user *buf, unsigned bytes)
947 char *kaddr;
948 int left;
950 kaddr = kmap_atomic(page, KM_USER0);
951 left = __copy_from_user_inatomic_nocache(kaddr + offset, buf, bytes);
952 kunmap_atomic(kaddr, KM_USER0);
954 if (left != 0) {
955 /* Do it the slow way */
956 kaddr = kmap(page);
957 left = __copy_from_user_nocache(kaddr + offset, buf, bytes);
958 kunmap(page);
960 return bytes - left;
964 * reiser4_write_extent - write method of extent item plugin
965 * @file: file to write to
966 * @buf: address of user-space buffer
967 * @count: number of bytes to write
968 * @pos: position in file to write to
971 ssize_t reiser4_write_extent(struct file *file, struct inode * inode,
972 const char __user *buf, size_t count, loff_t *pos)
974 int have_to_update_extent;
975 int nr_pages, nr_dirty;
976 struct page *page;
977 jnode *jnodes[WRITE_GRANULARITY + 1];
978 unsigned long index;
979 unsigned long end;
980 int i;
981 int to_page, page_off;
982 size_t left, written;
983 int result = 0;
985 if (write_extent_reserve_space(inode))
986 return RETERR(-ENOSPC);
988 if (count == 0) {
989 /* truncate case */
990 update_extents(file, inode, jnodes, 0, *pos);
991 return 0;
994 BUG_ON(get_current_context()->trans->atom != NULL);
996 left = count;
997 index = *pos >> PAGE_CACHE_SHIFT;
998 /* calculate number of pages which are to be written */
999 end = ((*pos + count - 1) >> PAGE_CACHE_SHIFT);
1000 nr_pages = end - index + 1;
1001 nr_dirty = 0;
1002 assert("", nr_pages <= WRITE_GRANULARITY + 1);
1004 /* get pages and jnodes */
1005 for (i = 0; i < nr_pages; i ++) {
1006 page = find_or_create_page(inode->i_mapping, index + i,
1007 reiser4_ctx_gfp_mask_get());
1008 if (page == NULL) {
1009 nr_pages = i;
1010 result = RETERR(-ENOMEM);
1011 goto out;
1014 jnodes[i] = jnode_of_page(page);
1015 if (IS_ERR(jnodes[i])) {
1016 unlock_page(page);
1017 page_cache_release(page);
1018 nr_pages = i;
1019 result = RETERR(-ENOMEM);
1020 goto out;
1022 /* prevent jnode and page from disconnecting */
1023 JF_SET(jnodes[i], JNODE_WRITE_PREPARED);
1024 unlock_page(page);
1027 BUG_ON(get_current_context()->trans->atom != NULL);
1029 have_to_update_extent = 0;
1031 page_off = (*pos & (PAGE_CACHE_SIZE - 1));
1032 for (i = 0; i < nr_pages; i ++) {
1033 to_page = PAGE_CACHE_SIZE - page_off;
1034 if (to_page > left)
1035 to_page = left;
1036 page = jnode_page(jnodes[i]);
1037 if (page_offset(page) < inode->i_size &&
1038 !PageUptodate(page) && to_page != PAGE_CACHE_SIZE) {
1040 * the above is not optimal for partial write to last
1041 * page of file when file size is not at boundary of
1042 * page
1044 lock_page(page);
1045 if (!PageUptodate(page)) {
1046 result = readpage_unix_file(NULL, page);
1047 BUG_ON(result != 0);
1048 /* wait for read completion */
1049 lock_page(page);
1050 BUG_ON(!PageUptodate(page));
1051 } else
1052 result = 0;
1053 unlock_page(page);
1056 BUG_ON(get_current_context()->trans->atom != NULL);
1057 fault_in_pages_readable(buf, to_page);
1058 BUG_ON(get_current_context()->trans->atom != NULL);
1060 lock_page(page);
1061 if (!PageUptodate(page) && to_page != PAGE_CACHE_SIZE)
1062 zero_user_segments(page, 0, page_off,
1063 page_off + to_page,
1064 PAGE_CACHE_SIZE);
1066 written = filemap_copy_from_user(page, page_off, buf, to_page);
1067 if (unlikely(written != to_page)) {
1068 unlock_page(page);
1069 result = RETERR(-EFAULT);
1070 break;
1073 flush_dcache_page(page);
1074 reiser4_set_page_dirty_internal(page);
1075 unlock_page(page);
1076 nr_dirty++;
1078 mark_page_accessed(page);
1079 SetPageUptodate(page);
1081 if (jnodes[i]->blocknr == 0)
1082 have_to_update_extent ++;
1084 page_off = 0;
1085 buf += to_page;
1086 left -= to_page;
1087 BUG_ON(get_current_context()->trans->atom != NULL);
1090 if (have_to_update_extent) {
1091 update_extents(file, inode, jnodes, nr_dirty, *pos);
1092 } else {
1093 for (i = 0; i < nr_dirty; i ++) {
1094 int ret;
1095 spin_lock_jnode(jnodes[i]);
1096 ret = reiser4_try_capture(jnodes[i],
1097 ZNODE_WRITE_LOCK, 0);
1098 BUG_ON(ret != 0);
1099 jnode_make_dirty_locked(jnodes[i]);
1100 spin_unlock_jnode(jnodes[i]);
1103 out:
1104 for (i = 0; i < nr_pages; i ++) {
1105 page_cache_release(jnode_page(jnodes[i]));
1106 JF_CLR(jnodes[i], JNODE_WRITE_PREPARED);
1107 jput(jnodes[i]);
1110 /* the only errors handled so far is ENOMEM and
1111 EFAULT on copy_from_user */
1113 return (count - left) ? (count - left) : result;
1116 int reiser4_do_readpage_extent(reiser4_extent * ext, reiser4_block_nr pos,
1117 struct page *page)
1119 jnode *j;
1120 struct address_space *mapping;
1121 unsigned long index;
1122 oid_t oid;
1123 reiser4_block_nr block;
1125 mapping = page->mapping;
1126 oid = get_inode_oid(mapping->host);
1127 index = page->index;
1129 switch (state_of_extent(ext)) {
1130 case HOLE_EXTENT:
1132 * it is possible to have hole page with jnode, if page was
1133 * eflushed previously.
1135 j = jfind(mapping, index);
1136 if (j == NULL) {
1137 zero_user(page, 0, PAGE_CACHE_SIZE);
1138 SetPageUptodate(page);
1139 unlock_page(page);
1140 return 0;
1142 spin_lock_jnode(j);
1143 if (!jnode_page(j)) {
1144 jnode_attach_page(j, page);
1145 } else {
1146 BUG_ON(jnode_page(j) != page);
1147 assert("vs-1504", jnode_page(j) == page);
1149 block = *jnode_get_io_block(j);
1150 spin_unlock_jnode(j);
1151 if (block == 0) {
1152 zero_user(page, 0, PAGE_CACHE_SIZE);
1153 SetPageUptodate(page);
1154 unlock_page(page);
1155 jput(j);
1156 return 0;
1158 break;
1160 case ALLOCATED_EXTENT:
1161 j = jnode_of_page(page);
1162 if (IS_ERR(j))
1163 return PTR_ERR(j);
1164 if (*jnode_get_block(j) == 0) {
1165 reiser4_block_nr blocknr;
1167 blocknr = extent_get_start(ext) + pos;
1168 jnode_set_block(j, &blocknr);
1169 } else
1170 assert("vs-1403",
1171 j->blocknr == extent_get_start(ext) + pos);
1172 break;
1174 case UNALLOCATED_EXTENT:
1175 j = jfind(mapping, index);
1176 assert("nikita-2688", j);
1177 assert("vs-1426", jnode_page(j) == NULL);
1179 spin_lock_jnode(j);
1180 jnode_attach_page(j, page);
1181 spin_unlock_jnode(j);
1182 break;
1184 default:
1185 warning("vs-957", "wrong extent\n");
1186 return RETERR(-EIO);
1189 BUG_ON(j == 0);
1190 reiser4_page_io(page, j, READ, reiser4_ctx_gfp_mask_get());
1191 jput(j);
1192 return 0;
1195 /* Implements plugin->u.item.s.file.read operation for extent items. */
1196 int reiser4_read_extent(struct file *file, flow_t *flow, hint_t *hint)
1198 int result;
1199 struct page *page;
1200 unsigned long cur_page, next_page;
1201 unsigned long page_off, count;
1202 struct address_space *mapping;
1203 loff_t file_off;
1204 uf_coord_t *uf_coord;
1205 coord_t *coord;
1206 struct extent_coord_extension *ext_coord;
1207 unsigned long nr_pages;
1208 char *kaddr;
1210 assert("vs-1353", current_blocksize == PAGE_CACHE_SIZE);
1211 assert("vs-572", flow->user == 1);
1212 assert("vs-1351", flow->length > 0);
1214 uf_coord = &hint->ext_coord;
1216 check_uf_coord(uf_coord, NULL);
1217 assert("vs-33", uf_coord->lh == &hint->lh);
1219 coord = &uf_coord->coord;
1220 assert("vs-1119", znode_is_rlocked(coord->node));
1221 assert("vs-1120", znode_is_loaded(coord->node));
1222 assert("vs-1256", coord_matches_key_extent(coord, &flow->key));
1224 mapping = file->f_dentry->d_inode->i_mapping;
1225 ext_coord = &uf_coord->extension.extent;
1227 /* offset in a file to start read from */
1228 file_off = get_key_offset(&flow->key);
1229 /* offset within the page to start read from */
1230 page_off = (unsigned long)(file_off & (PAGE_CACHE_SIZE - 1));
1231 /* bytes which can be read from the page which contains file_off */
1232 count = PAGE_CACHE_SIZE - page_off;
1234 /* index of page containing offset read is to start from */
1235 cur_page = (unsigned long)(file_off >> PAGE_CACHE_SHIFT);
1236 next_page = cur_page;
1237 /* number of pages flow spans over */
1238 nr_pages =
1239 ((file_off + flow->length + PAGE_CACHE_SIZE -
1240 1) >> PAGE_CACHE_SHIFT) - cur_page;
1242 /* we start having twig node read locked. However, we do not want to
1243 keep that lock all the time readahead works. So, set a sel and
1244 release twig node. */
1245 reiser4_set_hint(hint, &flow->key, ZNODE_READ_LOCK);
1246 /* &hint->lh is done-ed */
1248 do {
1249 reiser4_txn_restart_current();
1250 page = read_mapping_page(mapping, cur_page, file);
1251 if (IS_ERR(page))
1252 return PTR_ERR(page);
1253 lock_page(page);
1254 if (!PageUptodate(page)) {
1255 unlock_page(page);
1256 page_cache_release(page);
1257 warning("jmacd-97178", "extent_read: page is not up to date");
1258 return RETERR(-EIO);
1260 mark_page_accessed(page);
1261 unlock_page(page);
1263 /* If users can be writing to this page using arbitrary virtual
1264 addresses, take care about potential aliasing before reading
1265 the page on the kernel side.
1267 if (mapping_writably_mapped(mapping))
1268 flush_dcache_page(page);
1270 assert("nikita-3034", reiser4_schedulable());
1272 /* number of bytes which are to be read from the page */
1273 if (count > flow->length)
1274 count = flow->length;
1276 result = fault_in_pages_writeable(flow->data, count);
1277 if (result) {
1278 page_cache_release(page);
1279 return RETERR(-EFAULT);
1282 kaddr = kmap_atomic(page, KM_USER0);
1283 result = __copy_to_user_inatomic(flow->data,
1284 kaddr + page_off, count);
1285 kunmap_atomic(kaddr, KM_USER0);
1286 if (result != 0) {
1287 kaddr = kmap(page);
1288 result = __copy_to_user(flow->data, kaddr + page_off, count);
1289 kunmap(page);
1290 if (unlikely(result))
1291 return RETERR(-EFAULT);
1294 page_cache_release(page);
1296 /* increase key (flow->key), update user area pointer (flow->data) */
1297 move_flow_forward(flow, count);
1299 page_off = 0;
1300 cur_page ++;
1301 count = PAGE_CACHE_SIZE;
1302 nr_pages--;
1303 } while (flow->length);
1305 return 0;
1309 plugin->s.file.readpage
1310 reiser4_read->unix_file_read->page_cache_readahead->reiser4_readpage->unix_file_readpage->extent_readpage
1312 filemap_fault->reiser4_readpage->readpage_unix_file->->readpage_extent
1314 At the beginning: coord->node is read locked, zloaded, page is
1315 locked, coord is set to existing unit inside of extent item (it is not necessary that coord matches to page->index)
1317 int reiser4_readpage_extent(void *vp, struct page *page)
1319 uf_coord_t *uf_coord = vp;
1320 ON_DEBUG(coord_t * coord = &uf_coord->coord);
1321 ON_DEBUG(reiser4_key key);
1323 assert("vs-1040", PageLocked(page));
1324 assert("vs-1050", !PageUptodate(page));
1325 assert("vs-1039", page->mapping && page->mapping->host);
1327 assert("vs-1044", znode_is_loaded(coord->node));
1328 assert("vs-758", item_is_extent(coord));
1329 assert("vs-1046", coord_is_existing_unit(coord));
1330 assert("vs-1045", znode_is_rlocked(coord->node));
1331 assert("vs-1047",
1332 page->mapping->host->i_ino ==
1333 get_key_objectid(item_key_by_coord(coord, &key)));
1334 check_uf_coord(uf_coord, NULL);
1336 return reiser4_do_readpage_extent(
1337 ext_by_ext_coord(uf_coord),
1338 uf_coord->extension.extent.pos_in_unit, page);
1342 * get_block_address_extent
1343 * @coord:
1344 * @block:
1345 * @result:
1349 int get_block_address_extent(const coord_t *coord, sector_t block,
1350 sector_t *result)
1352 reiser4_extent *ext;
1354 if (!coord_is_existing_unit(coord))
1355 return RETERR(-EINVAL);
1357 ext = extent_by_coord(coord);
1359 if (state_of_extent(ext) != ALLOCATED_EXTENT)
1360 /* FIXME: bad things may happen if it is unallocated extent */
1361 *result = 0;
1362 else {
1363 reiser4_key key;
1365 unit_key_by_coord(coord, &key);
1366 assert("vs-1645",
1367 block >= get_key_offset(&key) >> current_blocksize_bits);
1368 assert("vs-1646",
1369 block <
1370 (get_key_offset(&key) >> current_blocksize_bits) +
1371 extent_get_width(ext));
1372 *result =
1373 extent_get_start(ext) + (block -
1374 (get_key_offset(&key) >>
1375 current_blocksize_bits));
1377 return 0;
1381 plugin->u.item.s.file.append_key
1382 key of first byte which is the next to last byte by addressed by this extent
1384 reiser4_key *append_key_extent(const coord_t * coord, reiser4_key * key)
1386 item_key_by_coord(coord, key);
1387 set_key_offset(key,
1388 get_key_offset(key) + reiser4_extent_size(coord,
1389 nr_units_extent
1390 (coord)));
1392 assert("vs-610", get_key_offset(key)
1393 && (get_key_offset(key) & (current_blocksize - 1)) == 0);
1394 return key;
1397 /* plugin->u.item.s.file.init_coord_extension */
1398 void init_coord_extension_extent(uf_coord_t * uf_coord, loff_t lookuped)
1400 coord_t *coord;
1401 struct extent_coord_extension *ext_coord;
1402 reiser4_key key;
1403 loff_t offset;
1405 assert("vs-1295", uf_coord->valid == 0);
1407 coord = &uf_coord->coord;
1408 assert("vs-1288", coord_is_iplug_set(coord));
1409 assert("vs-1327", znode_is_loaded(coord->node));
1411 if (coord->between != AFTER_UNIT && coord->between != AT_UNIT)
1412 return;
1414 ext_coord = &uf_coord->extension.extent;
1415 ext_coord->nr_units = nr_units_extent(coord);
1416 ext_coord->ext_offset =
1417 (char *)extent_by_coord(coord) - zdata(coord->node);
1418 ext_coord->width = extent_get_width(extent_by_coord(coord));
1419 ON_DEBUG(ext_coord->extent = *extent_by_coord(coord));
1420 uf_coord->valid = 1;
1422 /* pos_in_unit is the only uninitialized field in extended coord */
1423 if (coord->between == AFTER_UNIT) {
1424 assert("vs-1330",
1425 coord->unit_pos == nr_units_extent(coord) - 1);
1427 ext_coord->pos_in_unit = ext_coord->width - 1;
1428 } else {
1429 /* AT_UNIT */
1430 unit_key_by_coord(coord, &key);
1431 offset = get_key_offset(&key);
1433 assert("vs-1328", offset <= lookuped);
1434 assert("vs-1329",
1435 lookuped <
1436 offset + ext_coord->width * current_blocksize);
1437 ext_coord->pos_in_unit =
1438 ((lookuped - offset) >> current_blocksize_bits);
1443 * Local variables:
1444 * c-indentation-style: "K&R"
1445 * mode-name: "LC"
1446 * c-basic-offset: 8
1447 * tab-width: 8
1448 * fill-column: 79
1449 * scroll-step: 1
1450 * End: