mm-only debug patch...
[mmotm.git] / fs / reiser4 / plugin / item / extent_file_ops.c
blobbb7ce5608dc333fd81eb99756b4b7e79a63f4be4
1 /* COPYRIGHT 2001, 2002, 2003 by Hans Reiser, licensing governed by reiser4/README */
3 #include "item.h"
4 #include "../../inode.h"
5 #include "../../page_cache.h"
6 #include "../object.h"
8 #include <linux/quotaops.h>
9 #include <linux/swap.h>
11 static inline reiser4_extent *ext_by_offset(const znode *node, int offset)
13 reiser4_extent *ext;
15 ext = (reiser4_extent *) (zdata(node) + offset);
16 return ext;
19 /**
20 * check_uf_coord - verify coord extension
21 * @uf_coord:
22 * @key:
24 * Makes sure that all fields of @uf_coord are set properly. If @key is
25 * specified - check whether @uf_coord is set correspondingly.
27 static void check_uf_coord(const uf_coord_t *uf_coord, const reiser4_key *key)
29 #if REISER4_DEBUG
30 const coord_t *coord;
31 const struct extent_coord_extension *ext_coord;
32 reiser4_extent *ext;
34 coord = &uf_coord->coord;
35 ext_coord = &uf_coord->extension.extent;
36 ext = ext_by_offset(coord->node, uf_coord->extension.extent.ext_offset);
38 assert("",
39 WITH_DATA(coord->node,
40 (uf_coord->valid == 1 &&
41 coord_is_iplug_set(coord) &&
42 item_is_extent(coord) &&
43 ext_coord->nr_units == nr_units_extent(coord) &&
44 ext == extent_by_coord(coord) &&
45 ext_coord->width == extent_get_width(ext) &&
46 coord->unit_pos < ext_coord->nr_units &&
47 ext_coord->pos_in_unit < ext_coord->width &&
48 memcmp(ext, &ext_coord->extent,
49 sizeof(reiser4_extent)) == 0)));
50 if (key) {
51 reiser4_key coord_key;
53 unit_key_by_coord(&uf_coord->coord, &coord_key);
54 set_key_offset(&coord_key,
55 get_key_offset(&coord_key) +
56 (uf_coord->extension.extent.
57 pos_in_unit << PAGE_CACHE_SHIFT));
58 assert("", keyeq(key, &coord_key));
60 #endif
63 static inline reiser4_extent *ext_by_ext_coord(const uf_coord_t *uf_coord)
65 check_uf_coord(uf_coord, NULL);
67 return ext_by_offset(uf_coord->coord.node,
68 uf_coord->extension.extent.ext_offset);
71 #if REISER4_DEBUG
73 /**
74 * offset_is_in_unit
79 /* return 1 if offset @off is inside of extent unit pointed to by @coord. Set
80 pos_in_unit inside of unit correspondingly */
81 static int offset_is_in_unit(const coord_t *coord, loff_t off)
83 reiser4_key unit_key;
84 __u64 unit_off;
85 reiser4_extent *ext;
87 ext = extent_by_coord(coord);
89 unit_key_extent(coord, &unit_key);
90 unit_off = get_key_offset(&unit_key);
91 if (off < unit_off)
92 return 0;
93 if (off >= (unit_off + (current_blocksize * extent_get_width(ext))))
94 return 0;
95 return 1;
98 static int
99 coord_matches_key_extent(const coord_t * coord, const reiser4_key * key)
101 reiser4_key item_key;
103 assert("vs-771", coord_is_existing_unit(coord));
104 assert("vs-1258", keylt(key, append_key_extent(coord, &item_key)));
105 assert("vs-1259", keyge(key, item_key_by_coord(coord, &item_key)));
107 return offset_is_in_unit(coord, get_key_offset(key));
110 #endif
113 * can_append -
114 * @key:
115 * @coord:
117 * Returns 1 if @key is equal to an append key of item @coord is set to
119 static int can_append(const reiser4_key *key, const coord_t *coord)
121 reiser4_key append_key;
123 return keyeq(key, append_key_extent(coord, &append_key));
127 * append_hole
128 * @coord:
129 * @lh:
130 * @key:
133 static int append_hole(coord_t *coord, lock_handle *lh,
134 const reiser4_key *key)
136 reiser4_key append_key;
137 reiser4_block_nr hole_width;
138 reiser4_extent *ext, new_ext;
139 reiser4_item_data idata;
141 /* last item of file may have to be appended with hole */
142 assert("vs-708", znode_get_level(coord->node) == TWIG_LEVEL);
143 assert("vs-714", item_id_by_coord(coord) == EXTENT_POINTER_ID);
145 /* key of first byte which is not addressed by this extent */
146 append_key_extent(coord, &append_key);
148 assert("", keyle(&append_key, key));
151 * extent item has to be appended with hole. Calculate length of that
152 * hole
154 hole_width = ((get_key_offset(key) - get_key_offset(&append_key) +
155 current_blocksize - 1) >> current_blocksize_bits);
156 assert("vs-954", hole_width > 0);
158 /* set coord after last unit */
159 coord_init_after_item_end(coord);
161 /* get last extent in the item */
162 ext = extent_by_coord(coord);
163 if (state_of_extent(ext) == HOLE_EXTENT) {
165 * last extent of a file is hole extent. Widen that extent by
166 * @hole_width blocks. Note that we do not worry about
167 * overflowing - extent width is 64 bits
169 reiser4_set_extent(ext, HOLE_EXTENT_START,
170 extent_get_width(ext) + hole_width);
171 znode_make_dirty(coord->node);
172 return 0;
175 /* append last item of the file with hole extent unit */
176 assert("vs-713", (state_of_extent(ext) == ALLOCATED_EXTENT ||
177 state_of_extent(ext) == UNALLOCATED_EXTENT));
179 reiser4_set_extent(&new_ext, HOLE_EXTENT_START, hole_width);
180 init_new_extent(&idata, &new_ext, 1);
181 return insert_into_item(coord, lh, &append_key, &idata, 0);
185 * check_jnodes
186 * @twig: longterm locked twig node
187 * @key:
190 static void check_jnodes(znode *twig, const reiser4_key *key, int count)
192 #if REISER4_DEBUG
193 coord_t c;
194 reiser4_key node_key, jnode_key;
196 jnode_key = *key;
198 assert("", twig != NULL);
199 assert("", znode_get_level(twig) == TWIG_LEVEL);
200 assert("", znode_is_write_locked(twig));
202 zload(twig);
203 /* get the smallest key in twig node */
204 coord_init_first_unit(&c, twig);
205 unit_key_by_coord(&c, &node_key);
206 assert("", keyle(&node_key, &jnode_key));
208 coord_init_last_unit(&c, twig);
209 unit_key_by_coord(&c, &node_key);
210 if (item_plugin_by_coord(&c)->s.file.append_key)
211 item_plugin_by_coord(&c)->s.file.append_key(&c, &node_key);
212 set_key_offset(&jnode_key,
213 get_key_offset(&jnode_key) + (loff_t)count * PAGE_CACHE_SIZE - 1);
214 assert("", keylt(&jnode_key, &node_key));
215 zrelse(twig);
216 #endif
220 * append_last_extent - append last file item
221 * @uf_coord: coord to start insertion from
222 * @jnodes: array of jnodes
223 * @count: number of jnodes in the array
225 * There is already at least one extent item of file @inode in the tree. Append
226 * the last of them with unallocated extent unit of width @count. Assign
227 * fake block numbers to jnodes corresponding to the inserted extent.
229 static int append_last_extent(uf_coord_t *uf_coord, const reiser4_key *key,
230 jnode **jnodes, int count)
232 int result;
233 reiser4_extent new_ext;
234 reiser4_item_data idata;
235 coord_t *coord;
236 struct extent_coord_extension *ext_coord;
237 reiser4_extent *ext;
238 reiser4_block_nr block;
239 jnode *node;
240 int i;
242 coord = &uf_coord->coord;
243 ext_coord = &uf_coord->extension.extent;
244 ext = ext_by_ext_coord(uf_coord);
246 /* check correctness of position in the item */
247 assert("vs-228", coord->unit_pos == coord_last_unit_pos(coord));
248 assert("vs-1311", coord->between == AFTER_UNIT);
249 assert("vs-1302", ext_coord->pos_in_unit == ext_coord->width - 1);
251 if (!can_append(key, coord)) {
252 /* hole extent has to be inserted */
253 result = append_hole(coord, uf_coord->lh, key);
254 uf_coord->valid = 0;
255 return result;
258 if (count == 0)
259 return 0;
261 assert("", get_key_offset(key) == (loff_t)index_jnode(jnodes[0]) * PAGE_CACHE_SIZE);
263 result = vfs_dq_alloc_block_nodirty(mapping_jnode(jnodes[0])->host,
264 count);
265 BUG_ON(result != 0);
267 switch (state_of_extent(ext)) {
268 case UNALLOCATED_EXTENT:
270 * last extent unit of the file is unallocated one. Increase
271 * its width by @count
273 reiser4_set_extent(ext, UNALLOCATED_EXTENT_START,
274 extent_get_width(ext) + count);
275 znode_make_dirty(coord->node);
277 /* update coord extension */
278 ext_coord->width += count;
279 ON_DEBUG(extent_set_width
280 (&uf_coord->extension.extent.extent,
281 ext_coord->width));
282 break;
284 case HOLE_EXTENT:
285 case ALLOCATED_EXTENT:
287 * last extent unit of the file is either hole or allocated
288 * one. Append one unallocated extent of width @count
290 reiser4_set_extent(&new_ext, UNALLOCATED_EXTENT_START, count);
291 init_new_extent(&idata, &new_ext, 1);
292 result = insert_into_item(coord, uf_coord->lh, key, &idata, 0);
293 uf_coord->valid = 0;
294 if (result)
295 return result;
296 break;
298 default:
299 return RETERR(-EIO);
303 * make sure that we hold long term locked twig node containing all
304 * jnodes we are about to capture
306 check_jnodes(uf_coord->lh->node, key, count);
309 * assign fake block numbers to all jnodes. FIXME: make sure whether
310 * twig node containing inserted extent item is locked
312 block = fake_blocknr_unformatted(count);
313 for (i = 0; i < count; i ++, block ++) {
314 node = jnodes[i];
315 spin_lock_jnode(node);
316 JF_SET(node, JNODE_CREATED);
317 jnode_set_block(node, &block);
318 result = reiser4_try_capture(node, ZNODE_WRITE_LOCK, 0);
319 BUG_ON(result != 0);
320 jnode_make_dirty_locked(node);
321 spin_unlock_jnode(node);
323 return count;
327 * insert_first_hole - inser hole extent into tree
328 * @coord:
329 * @lh:
330 * @key:
334 static int insert_first_hole(coord_t *coord, lock_handle *lh,
335 const reiser4_key *key)
337 reiser4_extent new_ext;
338 reiser4_item_data idata;
339 reiser4_key item_key;
340 reiser4_block_nr hole_width;
342 /* @coord must be set for inserting of new item */
343 assert("vs-711", coord_is_between_items(coord));
345 item_key = *key;
346 set_key_offset(&item_key, 0ull);
348 hole_width = ((get_key_offset(key) + current_blocksize - 1) >>
349 current_blocksize_bits);
350 assert("vs-710", hole_width > 0);
352 /* compose body of hole extent and insert item into tree */
353 reiser4_set_extent(&new_ext, HOLE_EXTENT_START, hole_width);
354 init_new_extent(&idata, &new_ext, 1);
355 return insert_extent_by_coord(coord, &idata, &item_key, lh);
360 * insert_first_extent - insert first file item
361 * @inode: inode of file
362 * @uf_coord: coord to start insertion from
363 * @jnodes: array of jnodes
364 * @count: number of jnodes in the array
365 * @inode:
367 * There are no items of file @inode in the tree yet. Insert unallocated extent
368 * of width @count into tree or hole extent if writing not to the
369 * beginning. Assign fake block numbers to jnodes corresponding to the inserted
370 * unallocated extent. Returns number of jnodes or error code.
372 static int insert_first_extent(uf_coord_t *uf_coord, const reiser4_key *key,
373 jnode **jnodes, int count,
374 struct inode *inode)
376 int result;
377 int i;
378 reiser4_extent new_ext;
379 reiser4_item_data idata;
380 reiser4_block_nr block;
381 struct unix_file_info *uf_info;
382 jnode *node;
384 /* first extent insertion starts at leaf level */
385 assert("vs-719", znode_get_level(uf_coord->coord.node) == LEAF_LEVEL);
386 assert("vs-711", coord_is_between_items(&uf_coord->coord));
388 if (get_key_offset(key) != 0) {
389 result = insert_first_hole(&uf_coord->coord, uf_coord->lh, key);
390 uf_coord->valid = 0;
391 uf_info = unix_file_inode_data(inode);
394 * first item insertion is only possible when writing to empty
395 * file or performing tail conversion
397 assert("", (uf_info->container == UF_CONTAINER_EMPTY ||
398 (reiser4_inode_get_flag(inode,
399 REISER4_PART_MIXED) &&
400 reiser4_inode_get_flag(inode,
401 REISER4_PART_IN_CONV))));
402 /* if file was empty - update its state */
403 if (result == 0 && uf_info->container == UF_CONTAINER_EMPTY)
404 uf_info->container = UF_CONTAINER_EXTENTS;
405 return result;
408 if (count == 0)
409 return 0;
411 result = vfs_dq_alloc_block_nodirty(mapping_jnode(jnodes[0])->host,
412 count);
413 BUG_ON(result != 0);
416 * prepare for tree modification: compose body of item and item data
417 * structure needed for insertion
419 reiser4_set_extent(&new_ext, UNALLOCATED_EXTENT_START, count);
420 init_new_extent(&idata, &new_ext, 1);
422 /* insert extent item into the tree */
423 result = insert_extent_by_coord(&uf_coord->coord, &idata, key,
424 uf_coord->lh);
425 if (result)
426 return result;
429 * make sure that we hold long term locked twig node containing all
430 * jnodes we are about to capture
432 check_jnodes(uf_coord->lh->node, key, count);
434 * assign fake block numbers to all jnodes, capture and mark them dirty
436 block = fake_blocknr_unformatted(count);
437 for (i = 0; i < count; i ++, block ++) {
438 node = jnodes[i];
439 spin_lock_jnode(node);
440 JF_SET(node, JNODE_CREATED);
441 jnode_set_block(node, &block);
442 result = reiser4_try_capture(node, ZNODE_WRITE_LOCK, 0);
443 BUG_ON(result != 0);
444 jnode_make_dirty_locked(node);
445 spin_unlock_jnode(node);
449 * invalidate coordinate, research must be performed to continue
450 * because write will continue on twig level
452 uf_coord->valid = 0;
453 return count;
457 * plug_hole - replace hole extent with unallocated and holes
458 * @uf_coord:
459 * @key:
460 * @node:
461 * @h: structure containing coordinate, lock handle, key, etc
463 * Creates an unallocated extent of width 1 within a hole. In worst case two
464 * additional extents can be created.
466 static int plug_hole(uf_coord_t *uf_coord, const reiser4_key *key, int *how)
468 struct replace_handle rh;
469 reiser4_extent *ext;
470 reiser4_block_nr width, pos_in_unit;
471 coord_t *coord;
472 struct extent_coord_extension *ext_coord;
473 int return_inserted_position;
475 check_uf_coord(uf_coord, key);
477 rh.coord = coord_by_uf_coord(uf_coord);
478 rh.lh = uf_coord->lh;
479 rh.flags = 0;
481 coord = coord_by_uf_coord(uf_coord);
482 ext_coord = ext_coord_by_uf_coord(uf_coord);
483 ext = ext_by_ext_coord(uf_coord);
485 width = ext_coord->width;
486 pos_in_unit = ext_coord->pos_in_unit;
488 *how = 0;
489 if (width == 1) {
490 reiser4_set_extent(ext, UNALLOCATED_EXTENT_START, 1);
491 znode_make_dirty(coord->node);
492 /* update uf_coord */
493 ON_DEBUG(ext_coord->extent = *ext);
494 *how = 1;
495 return 0;
496 } else if (pos_in_unit == 0) {
497 /* we deal with first element of extent */
498 if (coord->unit_pos) {
499 /* there is an extent to the left */
500 if (state_of_extent(ext - 1) == UNALLOCATED_EXTENT) {
502 * left neighboring unit is an unallocated
503 * extent. Increase its width and decrease
504 * width of hole
506 extent_set_width(ext - 1,
507 extent_get_width(ext - 1) + 1);
508 extent_set_width(ext, width - 1);
509 znode_make_dirty(coord->node);
511 /* update coord extension */
512 coord->unit_pos--;
513 ext_coord->width = extent_get_width(ext - 1);
514 ext_coord->pos_in_unit = ext_coord->width - 1;
515 ext_coord->ext_offset -= sizeof(reiser4_extent);
516 ON_DEBUG(ext_coord->extent =
517 *extent_by_coord(coord));
518 *how = 2;
519 return 0;
522 /* extent for replace */
523 reiser4_set_extent(&rh.overwrite, UNALLOCATED_EXTENT_START, 1);
524 /* extent to be inserted */
525 reiser4_set_extent(&rh.new_extents[0], HOLE_EXTENT_START,
526 width - 1);
527 rh.nr_new_extents = 1;
529 /* have reiser4_replace_extent to return with @coord and
530 @uf_coord->lh set to unit which was replaced */
531 return_inserted_position = 0;
532 *how = 3;
533 } else if (pos_in_unit == width - 1) {
534 /* we deal with last element of extent */
535 if (coord->unit_pos < nr_units_extent(coord) - 1) {
536 /* there is an extent unit to the right */
537 if (state_of_extent(ext + 1) == UNALLOCATED_EXTENT) {
539 * right neighboring unit is an unallocated
540 * extent. Increase its width and decrease
541 * width of hole
543 extent_set_width(ext + 1,
544 extent_get_width(ext + 1) + 1);
545 extent_set_width(ext, width - 1);
546 znode_make_dirty(coord->node);
548 /* update coord extension */
549 coord->unit_pos++;
550 ext_coord->width = extent_get_width(ext + 1);
551 ext_coord->pos_in_unit = 0;
552 ext_coord->ext_offset += sizeof(reiser4_extent);
553 ON_DEBUG(ext_coord->extent =
554 *extent_by_coord(coord));
555 *how = 4;
556 return 0;
559 /* extent for replace */
560 reiser4_set_extent(&rh.overwrite, HOLE_EXTENT_START, width - 1);
561 /* extent to be inserted */
562 reiser4_set_extent(&rh.new_extents[0], UNALLOCATED_EXTENT_START,
564 rh.nr_new_extents = 1;
566 /* have reiser4_replace_extent to return with @coord and
567 @uf_coord->lh set to unit which was inserted */
568 return_inserted_position = 1;
569 *how = 5;
570 } else {
571 /* extent for replace */
572 reiser4_set_extent(&rh.overwrite, HOLE_EXTENT_START,
573 pos_in_unit);
574 /* extents to be inserted */
575 reiser4_set_extent(&rh.new_extents[0], UNALLOCATED_EXTENT_START,
577 reiser4_set_extent(&rh.new_extents[1], HOLE_EXTENT_START,
578 width - pos_in_unit - 1);
579 rh.nr_new_extents = 2;
581 /* have reiser4_replace_extent to return with @coord and
582 @uf_coord->lh set to first of units which were inserted */
583 return_inserted_position = 1;
584 *how = 6;
586 unit_key_by_coord(coord, &rh.paste_key);
587 set_key_offset(&rh.paste_key, get_key_offset(&rh.paste_key) +
588 extent_get_width(&rh.overwrite) * current_blocksize);
590 uf_coord->valid = 0;
591 return reiser4_replace_extent(&rh, return_inserted_position);
595 * overwrite_one_block -
596 * @uf_coord:
597 * @key:
598 * @node:
600 * If @node corresponds to hole extent - create unallocated extent for it and
601 * assign fake block number. If @node corresponds to allocated extent - assign
602 * block number of jnode
604 static int overwrite_one_block(uf_coord_t *uf_coord, const reiser4_key *key,
605 jnode *node, int *hole_plugged)
607 int result;
608 struct extent_coord_extension *ext_coord;
609 reiser4_extent *ext;
610 reiser4_block_nr block;
611 int how;
613 assert("vs-1312", uf_coord->coord.between == AT_UNIT);
615 result = 0;
616 ext_coord = ext_coord_by_uf_coord(uf_coord);
617 ext = ext_by_ext_coord(uf_coord);
618 assert("", state_of_extent(ext) != UNALLOCATED_EXTENT);
620 switch (state_of_extent(ext)) {
621 case ALLOCATED_EXTENT:
622 block = extent_get_start(ext) + ext_coord->pos_in_unit;
623 break;
625 case HOLE_EXTENT:
626 result = vfs_dq_alloc_block_nodirty(mapping_jnode(node)->host,
628 BUG_ON(result != 0);
629 result = plug_hole(uf_coord, key, &how);
630 if (result)
631 return result;
632 block = fake_blocknr_unformatted(1);
633 if (hole_plugged)
634 *hole_plugged = 1;
635 JF_SET(node, JNODE_CREATED);
636 break;
638 default:
639 return RETERR(-EIO);
642 jnode_set_block(node, &block);
643 return 0;
647 * move_coord - move coordinate forward
648 * @uf_coord:
650 * Move coordinate one data block pointer forward. Return 1 if coord is set to
651 * the last one already or is invalid.
653 static int move_coord(uf_coord_t *uf_coord)
655 struct extent_coord_extension *ext_coord;
657 if (uf_coord->valid == 0)
658 return 1;
659 ext_coord = &uf_coord->extension.extent;
660 ext_coord->pos_in_unit ++;
661 if (ext_coord->pos_in_unit < ext_coord->width)
662 /* coordinate moved within the unit */
663 return 0;
665 /* end of unit is reached. Try to move to next unit */
666 ext_coord->pos_in_unit = 0;
667 uf_coord->coord.unit_pos ++;
668 if (uf_coord->coord.unit_pos < ext_coord->nr_units) {
669 /* coordinate moved to next unit */
670 ext_coord->ext_offset += sizeof(reiser4_extent);
671 ext_coord->width =
672 extent_get_width(ext_by_offset
673 (uf_coord->coord.node,
674 ext_coord->ext_offset));
675 ON_DEBUG(ext_coord->extent =
676 *ext_by_offset(uf_coord->coord.node,
677 ext_coord->ext_offset));
678 return 0;
680 /* end of item is reached */
681 uf_coord->valid = 0;
682 return 1;
686 * overwrite_extent -
687 * @inode:
689 * Returns number of handled jnodes.
691 static int overwrite_extent(uf_coord_t *uf_coord, const reiser4_key *key,
692 jnode **jnodes, int count, int *plugged_hole)
694 int result;
695 reiser4_key k;
696 int i;
697 jnode *node;
699 k = *key;
700 for (i = 0; i < count; i ++) {
701 node = jnodes[i];
702 if (*jnode_get_block(node) == 0) {
703 result = overwrite_one_block(uf_coord, &k, node, plugged_hole);
704 if (result)
705 return result;
708 * make sure that we hold long term locked twig node containing
709 * all jnodes we are about to capture
711 check_jnodes(uf_coord->lh->node, &k, 1);
713 * assign fake block numbers to all jnodes, capture and mark
714 * them dirty
716 spin_lock_jnode(node);
717 result = reiser4_try_capture(node, ZNODE_WRITE_LOCK, 0);
718 BUG_ON(result != 0);
719 jnode_make_dirty_locked(node);
720 spin_unlock_jnode(node);
722 if (uf_coord->valid == 0)
723 return i + 1;
725 check_uf_coord(uf_coord, &k);
727 if (move_coord(uf_coord)) {
729 * failed to move to the next node pointer. Either end
730 * of file or end of twig node is reached. In the later
731 * case we might go to the right neighbor.
733 uf_coord->valid = 0;
734 return i + 1;
736 set_key_offset(&k, get_key_offset(&k) + PAGE_CACHE_SIZE);
739 return count;
743 * reiser4_update_extent
744 * @file:
745 * @jnodes:
746 * @count:
747 * @off:
750 int reiser4_update_extent(struct inode *inode, jnode *node, loff_t pos,
751 int *plugged_hole)
753 int result;
754 znode *loaded;
755 uf_coord_t uf_coord;
756 coord_t *coord;
757 lock_handle lh;
758 reiser4_key key;
760 assert("", reiser4_lock_counters()->d_refs == 0);
762 key_by_inode_and_offset_common(inode, pos, &key);
764 init_uf_coord(&uf_coord, &lh);
765 coord = &uf_coord.coord;
766 result = find_file_item_nohint(coord, &lh, &key,
767 ZNODE_WRITE_LOCK, inode);
768 if (IS_CBKERR(result)) {
769 assert("", reiser4_lock_counters()->d_refs == 0);
770 return result;
773 result = zload(coord->node);
774 BUG_ON(result != 0);
775 loaded = coord->node;
777 if (coord->between == AFTER_UNIT) {
779 * append existing extent item with unallocated extent of width
780 * nr_jnodes
782 init_coord_extension_extent(&uf_coord,
783 get_key_offset(&key));
784 result = append_last_extent(&uf_coord, &key,
785 &node, 1);
786 } else if (coord->between == AT_UNIT) {
788 * overwrite
789 * not optimal yet. Will be optimized if new write will show
790 * performance win.
792 init_coord_extension_extent(&uf_coord,
793 get_key_offset(&key));
794 result = overwrite_extent(&uf_coord, &key,
795 &node, 1, plugged_hole);
796 } else {
798 * there are no items of this file in the tree yet. Create
799 * first item of the file inserting one unallocated extent of
800 * width nr_jnodes
802 result = insert_first_extent(&uf_coord, &key, &node, 1, inode);
804 assert("", result == 1 || result < 0);
805 zrelse(loaded);
806 done_lh(&lh);
807 assert("", reiser4_lock_counters()->d_refs == 0);
808 return (result == 1) ? 0 : result;
812 * update_extents
813 * @file:
814 * @jnodes:
815 * @count:
816 * @off:
819 static int update_extents(struct file *file, struct inode *inode,
820 jnode **jnodes, int count, loff_t pos)
822 struct hint hint;
823 reiser4_key key;
824 int result;
825 znode *loaded;
827 result = load_file_hint(file, &hint);
828 BUG_ON(result != 0);
830 if (count != 0)
832 * count == 0 is special case: expanding truncate
834 pos = (loff_t)index_jnode(jnodes[0]) << PAGE_CACHE_SHIFT;
835 key_by_inode_and_offset_common(inode, pos, &key);
837 assert("", reiser4_lock_counters()->d_refs == 0);
839 do {
840 result = find_file_item(&hint, &key, ZNODE_WRITE_LOCK, inode);
841 if (IS_CBKERR(result)) {
842 assert("", reiser4_lock_counters()->d_refs == 0);
843 return result;
846 result = zload(hint.ext_coord.coord.node);
847 BUG_ON(result != 0);
848 loaded = hint.ext_coord.coord.node;
850 if (hint.ext_coord.coord.between == AFTER_UNIT) {
852 * append existing extent item with unallocated extent
853 * of width nr_jnodes
855 if (hint.ext_coord.valid == 0)
856 /* NOTE: get statistics on this */
857 init_coord_extension_extent(&hint.ext_coord,
858 get_key_offset(&key));
859 result = append_last_extent(&hint.ext_coord, &key,
860 jnodes, count);
861 } else if (hint.ext_coord.coord.between == AT_UNIT) {
863 * overwrite
864 * not optimal yet. Will be optimized if new write will
865 * show performance win.
867 if (hint.ext_coord.valid == 0)
868 /* NOTE: get statistics on this */
869 init_coord_extension_extent(&hint.ext_coord,
870 get_key_offset(&key));
871 result = overwrite_extent(&hint.ext_coord, &key,
872 jnodes, count, NULL);
873 } else {
875 * there are no items of this file in the tree
876 * yet. Create first item of the file inserting one
877 * unallocated extent of * width nr_jnodes
879 result = insert_first_extent(&hint.ext_coord, &key,
880 jnodes, count, inode);
882 zrelse(loaded);
883 if (result < 0) {
884 done_lh(hint.ext_coord.lh);
885 break;
888 jnodes += result;
889 count -= result;
890 set_key_offset(&key, get_key_offset(&key) + result * PAGE_CACHE_SIZE);
892 /* seal and unlock znode */
893 if (hint.ext_coord.valid)
894 reiser4_set_hint(&hint, &key, ZNODE_WRITE_LOCK);
895 else
896 reiser4_unset_hint(&hint);
898 } while (count > 0);
900 save_file_hint(file, &hint);
901 assert("", reiser4_lock_counters()->d_refs == 0);
902 return result;
906 * write_extent_reserve_space - reserve space for extent write operation
907 * @inode:
909 * Estimates and reserves space which may be required for writing
910 * WRITE_GRANULARITY pages of file.
912 static int write_extent_reserve_space(struct inode *inode)
914 __u64 count;
915 reiser4_tree *tree;
918 * to write WRITE_GRANULARITY pages to a file by extents we have to
919 * reserve disk space for:
921 * 1. find_file_item may have to insert empty node to the tree (empty
922 * leaf node between two extent items). This requires 1 block and
923 * number of blocks which are necessary to perform insertion of an
924 * internal item into twig level.
926 * 2. for each of written pages there might be needed 1 block and
927 * number of blocks which might be necessary to perform insertion of or
928 * paste to an extent item.
930 * 3. stat data update
932 tree = reiser4_tree_by_inode(inode);
933 count = estimate_one_insert_item(tree) +
934 WRITE_GRANULARITY * (1 + estimate_one_insert_into_item(tree)) +
935 estimate_one_insert_item(tree);
936 grab_space_enable();
937 return reiser4_grab_space(count, 0 /* flags */);
941 * filemap_copy_from_user no longer exists in generic code, because it
942 * is deadlocky (copying from user while holding the page lock is bad).
943 * As a temporary fix for reiser4, just define it here.
945 static inline size_t
946 filemap_copy_from_user(struct page *page, unsigned long offset,
947 const char __user *buf, unsigned bytes)
949 char *kaddr;
950 int left;
952 kaddr = kmap_atomic(page, KM_USER0);
953 left = __copy_from_user_inatomic_nocache(kaddr + offset, buf, bytes);
954 kunmap_atomic(kaddr, KM_USER0);
956 if (left != 0) {
957 /* Do it the slow way */
958 kaddr = kmap(page);
959 left = __copy_from_user_nocache(kaddr + offset, buf, bytes);
960 kunmap(page);
962 return bytes - left;
966 * reiser4_write_extent - write method of extent item plugin
967 * @file: file to write to
968 * @buf: address of user-space buffer
969 * @count: number of bytes to write
970 * @pos: position in file to write to
973 ssize_t reiser4_write_extent(struct file *file, struct inode * inode,
974 const char __user *buf, size_t count, loff_t *pos)
976 int have_to_update_extent;
977 int nr_pages, nr_dirty;
978 struct page *page;
979 jnode *jnodes[WRITE_GRANULARITY + 1];
980 unsigned long index;
981 unsigned long end;
982 int i;
983 int to_page, page_off;
984 size_t left, written;
985 int result = 0;
987 if (write_extent_reserve_space(inode))
988 return RETERR(-ENOSPC);
990 if (count == 0) {
991 /* truncate case */
992 update_extents(file, inode, jnodes, 0, *pos);
993 return 0;
996 BUG_ON(get_current_context()->trans->atom != NULL);
998 left = count;
999 index = *pos >> PAGE_CACHE_SHIFT;
1000 /* calculate number of pages which are to be written */
1001 end = ((*pos + count - 1) >> PAGE_CACHE_SHIFT);
1002 nr_pages = end - index + 1;
1003 nr_dirty = 0;
1004 assert("", nr_pages <= WRITE_GRANULARITY + 1);
1006 /* get pages and jnodes */
1007 for (i = 0; i < nr_pages; i ++) {
1008 page = find_or_create_page(inode->i_mapping, index + i,
1009 reiser4_ctx_gfp_mask_get());
1010 if (page == NULL) {
1011 nr_pages = i;
1012 result = RETERR(-ENOMEM);
1013 goto out;
1016 jnodes[i] = jnode_of_page(page);
1017 if (IS_ERR(jnodes[i])) {
1018 unlock_page(page);
1019 page_cache_release(page);
1020 nr_pages = i;
1021 result = RETERR(-ENOMEM);
1022 goto out;
1024 /* prevent jnode and page from disconnecting */
1025 JF_SET(jnodes[i], JNODE_WRITE_PREPARED);
1026 unlock_page(page);
1029 BUG_ON(get_current_context()->trans->atom != NULL);
1031 have_to_update_extent = 0;
1033 page_off = (*pos & (PAGE_CACHE_SIZE - 1));
1034 for (i = 0; i < nr_pages; i ++) {
1035 to_page = PAGE_CACHE_SIZE - page_off;
1036 if (to_page > left)
1037 to_page = left;
1038 page = jnode_page(jnodes[i]);
1039 if (page_offset(page) < inode->i_size &&
1040 !PageUptodate(page) && to_page != PAGE_CACHE_SIZE) {
1042 * the above is not optimal for partial write to last
1043 * page of file when file size is not at boundary of
1044 * page
1046 lock_page(page);
1047 if (!PageUptodate(page)) {
1048 result = readpage_unix_file(NULL, page);
1049 BUG_ON(result != 0);
1050 /* wait for read completion */
1051 lock_page(page);
1052 BUG_ON(!PageUptodate(page));
1053 } else
1054 result = 0;
1055 unlock_page(page);
1058 BUG_ON(get_current_context()->trans->atom != NULL);
1059 fault_in_pages_readable(buf, to_page);
1060 BUG_ON(get_current_context()->trans->atom != NULL);
1062 lock_page(page);
1063 if (!PageUptodate(page) && to_page != PAGE_CACHE_SIZE)
1064 zero_user_segments(page, 0, page_off,
1065 page_off + to_page,
1066 PAGE_CACHE_SIZE);
1068 written = filemap_copy_from_user(page, page_off, buf, to_page);
1069 if (unlikely(written != to_page)) {
1070 unlock_page(page);
1071 result = RETERR(-EFAULT);
1072 break;
1075 flush_dcache_page(page);
1076 set_page_dirty_notag(page);
1077 unlock_page(page);
1078 nr_dirty++;
1080 mark_page_accessed(page);
1081 SetPageUptodate(page);
1083 if (jnodes[i]->blocknr == 0)
1084 have_to_update_extent ++;
1086 page_off = 0;
1087 buf += to_page;
1088 left -= to_page;
1089 BUG_ON(get_current_context()->trans->atom != NULL);
1092 if (have_to_update_extent) {
1093 update_extents(file, inode, jnodes, nr_dirty, *pos);
1094 } else {
1095 for (i = 0; i < nr_dirty; i ++) {
1096 int ret;
1097 spin_lock_jnode(jnodes[i]);
1098 ret = reiser4_try_capture(jnodes[i],
1099 ZNODE_WRITE_LOCK, 0);
1100 BUG_ON(ret != 0);
1101 jnode_make_dirty_locked(jnodes[i]);
1102 spin_unlock_jnode(jnodes[i]);
1105 out:
1106 for (i = 0; i < nr_pages; i ++) {
1107 page_cache_release(jnode_page(jnodes[i]));
1108 JF_CLR(jnodes[i], JNODE_WRITE_PREPARED);
1109 jput(jnodes[i]);
1112 /* the only errors handled so far is ENOMEM and
1113 EFAULT on copy_from_user */
1115 return (count - left) ? (count - left) : result;
1118 int reiser4_do_readpage_extent(reiser4_extent * ext, reiser4_block_nr pos,
1119 struct page *page)
1121 jnode *j;
1122 struct address_space *mapping;
1123 unsigned long index;
1124 oid_t oid;
1125 reiser4_block_nr block;
1127 mapping = page->mapping;
1128 oid = get_inode_oid(mapping->host);
1129 index = page->index;
1131 switch (state_of_extent(ext)) {
1132 case HOLE_EXTENT:
1134 * it is possible to have hole page with jnode, if page was
1135 * eflushed previously.
1137 j = jfind(mapping, index);
1138 if (j == NULL) {
1139 zero_user(page, 0, PAGE_CACHE_SIZE);
1140 SetPageUptodate(page);
1141 unlock_page(page);
1142 return 0;
1144 spin_lock_jnode(j);
1145 if (!jnode_page(j)) {
1146 jnode_attach_page(j, page);
1147 } else {
1148 BUG_ON(jnode_page(j) != page);
1149 assert("vs-1504", jnode_page(j) == page);
1151 block = *jnode_get_io_block(j);
1152 spin_unlock_jnode(j);
1153 if (block == 0) {
1154 zero_user(page, 0, PAGE_CACHE_SIZE);
1155 SetPageUptodate(page);
1156 unlock_page(page);
1157 jput(j);
1158 return 0;
1160 break;
1162 case ALLOCATED_EXTENT:
1163 j = jnode_of_page(page);
1164 if (IS_ERR(j))
1165 return PTR_ERR(j);
1166 if (*jnode_get_block(j) == 0) {
1167 reiser4_block_nr blocknr;
1169 blocknr = extent_get_start(ext) + pos;
1170 jnode_set_block(j, &blocknr);
1171 } else
1172 assert("vs-1403",
1173 j->blocknr == extent_get_start(ext) + pos);
1174 break;
1176 case UNALLOCATED_EXTENT:
1177 j = jfind(mapping, index);
1178 assert("nikita-2688", j);
1179 assert("vs-1426", jnode_page(j) == NULL);
1181 spin_lock_jnode(j);
1182 jnode_attach_page(j, page);
1183 spin_unlock_jnode(j);
1184 break;
1186 default:
1187 warning("vs-957", "wrong extent\n");
1188 return RETERR(-EIO);
1191 BUG_ON(j == 0);
1192 reiser4_page_io(page, j, READ, reiser4_ctx_gfp_mask_get());
1193 jput(j);
1194 return 0;
1197 /* Implements plugin->u.item.s.file.read operation for extent items. */
1198 int reiser4_read_extent(struct file *file, flow_t *flow, hint_t *hint)
1200 int result;
1201 struct page *page;
1202 unsigned long cur_page, next_page;
1203 unsigned long page_off, count;
1204 struct address_space *mapping;
1205 loff_t file_off;
1206 uf_coord_t *uf_coord;
1207 coord_t *coord;
1208 struct extent_coord_extension *ext_coord;
1209 unsigned long nr_pages;
1210 char *kaddr;
1212 assert("vs-1353", current_blocksize == PAGE_CACHE_SIZE);
1213 assert("vs-572", flow->user == 1);
1214 assert("vs-1351", flow->length > 0);
1216 uf_coord = &hint->ext_coord;
1218 check_uf_coord(uf_coord, NULL);
1219 assert("vs-33", uf_coord->lh == &hint->lh);
1221 coord = &uf_coord->coord;
1222 assert("vs-1119", znode_is_rlocked(coord->node));
1223 assert("vs-1120", znode_is_loaded(coord->node));
1224 assert("vs-1256", coord_matches_key_extent(coord, &flow->key));
1226 mapping = file->f_dentry->d_inode->i_mapping;
1227 ext_coord = &uf_coord->extension.extent;
1229 /* offset in a file to start read from */
1230 file_off = get_key_offset(&flow->key);
1231 /* offset within the page to start read from */
1232 page_off = (unsigned long)(file_off & (PAGE_CACHE_SIZE - 1));
1233 /* bytes which can be read from the page which contains file_off */
1234 count = PAGE_CACHE_SIZE - page_off;
1236 /* index of page containing offset read is to start from */
1237 cur_page = (unsigned long)(file_off >> PAGE_CACHE_SHIFT);
1238 next_page = cur_page;
1239 /* number of pages flow spans over */
1240 nr_pages =
1241 ((file_off + flow->length + PAGE_CACHE_SIZE -
1242 1) >> PAGE_CACHE_SHIFT) - cur_page;
1244 /* we start having twig node read locked. However, we do not want to
1245 keep that lock all the time readahead works. So, set a sel and
1246 release twig node. */
1247 reiser4_set_hint(hint, &flow->key, ZNODE_READ_LOCK);
1248 /* &hint->lh is done-ed */
1250 do {
1251 reiser4_txn_restart_current();
1252 page = read_mapping_page(mapping, cur_page, file);
1253 if (IS_ERR(page))
1254 return PTR_ERR(page);
1255 lock_page(page);
1256 if (!PageUptodate(page)) {
1257 unlock_page(page);
1258 page_cache_release(page);
1259 warning("jmacd-97178", "extent_read: page is not up to date");
1260 return RETERR(-EIO);
1262 mark_page_accessed(page);
1263 unlock_page(page);
1265 /* If users can be writing to this page using arbitrary virtual
1266 addresses, take care about potential aliasing before reading
1267 the page on the kernel side.
1269 if (mapping_writably_mapped(mapping))
1270 flush_dcache_page(page);
1272 assert("nikita-3034", reiser4_schedulable());
1274 /* number of bytes which are to be read from the page */
1275 if (count > flow->length)
1276 count = flow->length;
1278 result = fault_in_pages_writeable(flow->data, count);
1279 if (result) {
1280 page_cache_release(page);
1281 return RETERR(-EFAULT);
1284 kaddr = kmap_atomic(page, KM_USER0);
1285 result = __copy_to_user_inatomic(flow->data,
1286 kaddr + page_off, count);
1287 kunmap_atomic(kaddr, KM_USER0);
1288 if (result != 0) {
1289 kaddr = kmap(page);
1290 result = __copy_to_user(flow->data, kaddr + page_off, count);
1291 kunmap(page);
1292 if (unlikely(result))
1293 return RETERR(-EFAULT);
1296 page_cache_release(page);
1298 /* increase key (flow->key), update user area pointer (flow->data) */
1299 move_flow_forward(flow, count);
1301 page_off = 0;
1302 cur_page ++;
1303 count = PAGE_CACHE_SIZE;
1304 nr_pages--;
1305 } while (flow->length);
1307 return 0;
1311 plugin->s.file.readpage
1312 reiser4_read->unix_file_read->page_cache_readahead->reiser4_readpage->unix_file_readpage->extent_readpage
1314 filemap_fault->reiser4_readpage->readpage_unix_file->->readpage_extent
1316 At the beginning: coord->node is read locked, zloaded, page is
1317 locked, coord is set to existing unit inside of extent item (it is not necessary that coord matches to page->index)
1319 int reiser4_readpage_extent(void *vp, struct page *page)
1321 uf_coord_t *uf_coord = vp;
1322 ON_DEBUG(coord_t * coord = &uf_coord->coord);
1323 ON_DEBUG(reiser4_key key);
1325 assert("vs-1040", PageLocked(page));
1326 assert("vs-1050", !PageUptodate(page));
1327 assert("vs-1039", page->mapping && page->mapping->host);
1329 assert("vs-1044", znode_is_loaded(coord->node));
1330 assert("vs-758", item_is_extent(coord));
1331 assert("vs-1046", coord_is_existing_unit(coord));
1332 assert("vs-1045", znode_is_rlocked(coord->node));
1333 assert("vs-1047",
1334 page->mapping->host->i_ino ==
1335 get_key_objectid(item_key_by_coord(coord, &key)));
1336 check_uf_coord(uf_coord, NULL);
1338 return reiser4_do_readpage_extent(
1339 ext_by_ext_coord(uf_coord),
1340 uf_coord->extension.extent.pos_in_unit, page);
1344 * get_block_address_extent
1345 * @coord:
1346 * @block:
1347 * @result:
1351 int get_block_address_extent(const coord_t *coord, sector_t block,
1352 sector_t *result)
1354 reiser4_extent *ext;
1356 if (!coord_is_existing_unit(coord))
1357 return RETERR(-EINVAL);
1359 ext = extent_by_coord(coord);
1361 if (state_of_extent(ext) != ALLOCATED_EXTENT)
1362 /* FIXME: bad things may happen if it is unallocated extent */
1363 *result = 0;
1364 else {
1365 reiser4_key key;
1367 unit_key_by_coord(coord, &key);
1368 assert("vs-1645",
1369 block >= get_key_offset(&key) >> current_blocksize_bits);
1370 assert("vs-1646",
1371 block <
1372 (get_key_offset(&key) >> current_blocksize_bits) +
1373 extent_get_width(ext));
1374 *result =
1375 extent_get_start(ext) + (block -
1376 (get_key_offset(&key) >>
1377 current_blocksize_bits));
1379 return 0;
1383 plugin->u.item.s.file.append_key
1384 key of first byte which is the next to last byte by addressed by this extent
1386 reiser4_key *append_key_extent(const coord_t * coord, reiser4_key * key)
1388 item_key_by_coord(coord, key);
1389 set_key_offset(key,
1390 get_key_offset(key) + reiser4_extent_size(coord,
1391 nr_units_extent
1392 (coord)));
1394 assert("vs-610", get_key_offset(key)
1395 && (get_key_offset(key) & (current_blocksize - 1)) == 0);
1396 return key;
1399 /* plugin->u.item.s.file.init_coord_extension */
1400 void init_coord_extension_extent(uf_coord_t * uf_coord, loff_t lookuped)
1402 coord_t *coord;
1403 struct extent_coord_extension *ext_coord;
1404 reiser4_key key;
1405 loff_t offset;
1407 assert("vs-1295", uf_coord->valid == 0);
1409 coord = &uf_coord->coord;
1410 assert("vs-1288", coord_is_iplug_set(coord));
1411 assert("vs-1327", znode_is_loaded(coord->node));
1413 if (coord->between != AFTER_UNIT && coord->between != AT_UNIT)
1414 return;
1416 ext_coord = &uf_coord->extension.extent;
1417 ext_coord->nr_units = nr_units_extent(coord);
1418 ext_coord->ext_offset =
1419 (char *)extent_by_coord(coord) - zdata(coord->node);
1420 ext_coord->width = extent_get_width(extent_by_coord(coord));
1421 ON_DEBUG(ext_coord->extent = *extent_by_coord(coord));
1422 uf_coord->valid = 1;
1424 /* pos_in_unit is the only uninitialized field in extended coord */
1425 if (coord->between == AFTER_UNIT) {
1426 assert("vs-1330",
1427 coord->unit_pos == nr_units_extent(coord) - 1);
1429 ext_coord->pos_in_unit = ext_coord->width - 1;
1430 } else {
1431 /* AT_UNIT */
1432 unit_key_by_coord(coord, &key);
1433 offset = get_key_offset(&key);
1435 assert("vs-1328", offset <= lookuped);
1436 assert("vs-1329",
1437 lookuped <
1438 offset + ext_coord->width * current_blocksize);
1439 ext_coord->pos_in_unit =
1440 ((lookuped - offset) >> current_blocksize_bits);
1445 * Local variables:
1446 * c-indentation-style: "K&R"
1447 * mode-name: "LC"
1448 * c-basic-offset: 8
1449 * tab-width: 8
1450 * fill-column: 79
1451 * scroll-step: 1
1452 * End: