usb: renesas_usbhs: disable TX IRQ before starting TX DMAC transfer
[linux/fpc-iii.git] / fs / btrfs / file.c
blob8d60cf5195788967d8d7dba750e0f68c27b34dfe
1 /*
2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/fs.h>
20 #include <linux/pagemap.h>
21 #include <linux/highmem.h>
22 #include <linux/time.h>
23 #include <linux/init.h>
24 #include <linux/string.h>
25 #include <linux/backing-dev.h>
26 #include <linux/mpage.h>
27 #include <linux/aio.h>
28 #include <linux/falloc.h>
29 #include <linux/swap.h>
30 #include <linux/writeback.h>
31 #include <linux/statfs.h>
32 #include <linux/compat.h>
33 #include <linux/slab.h>
34 #include <linux/btrfs.h>
35 #include "ctree.h"
36 #include "disk-io.h"
37 #include "transaction.h"
38 #include "btrfs_inode.h"
39 #include "print-tree.h"
40 #include "tree-log.h"
41 #include "locking.h"
42 #include "volumes.h"
43 #include "qgroup.h"
45 static struct kmem_cache *btrfs_inode_defrag_cachep;
47 * when auto defrag is enabled we
48 * queue up these defrag structs to remember which
49 * inodes need defragging passes
51 struct inode_defrag {
52 struct rb_node rb_node;
53 /* objectid */
54 u64 ino;
56 * transid where the defrag was added, we search for
57 * extents newer than this
59 u64 transid;
61 /* root objectid */
62 u64 root;
64 /* last offset we were able to defrag */
65 u64 last_offset;
67 /* if we've wrapped around back to zero once already */
68 int cycled;
71 static int __compare_inode_defrag(struct inode_defrag *defrag1,
72 struct inode_defrag *defrag2)
74 if (defrag1->root > defrag2->root)
75 return 1;
76 else if (defrag1->root < defrag2->root)
77 return -1;
78 else if (defrag1->ino > defrag2->ino)
79 return 1;
80 else if (defrag1->ino < defrag2->ino)
81 return -1;
82 else
83 return 0;
86 /* pop a record for an inode into the defrag tree. The lock
87 * must be held already
89 * If you're inserting a record for an older transid than an
90 * existing record, the transid already in the tree is lowered
92 * If an existing record is found the defrag item you
93 * pass in is freed
95 static int __btrfs_add_inode_defrag(struct inode *inode,
96 struct inode_defrag *defrag)
98 struct btrfs_root *root = BTRFS_I(inode)->root;
99 struct inode_defrag *entry;
100 struct rb_node **p;
101 struct rb_node *parent = NULL;
102 int ret;
104 p = &root->fs_info->defrag_inodes.rb_node;
105 while (*p) {
106 parent = *p;
107 entry = rb_entry(parent, struct inode_defrag, rb_node);
109 ret = __compare_inode_defrag(defrag, entry);
110 if (ret < 0)
111 p = &parent->rb_left;
112 else if (ret > 0)
113 p = &parent->rb_right;
114 else {
115 /* if we're reinserting an entry for
116 * an old defrag run, make sure to
117 * lower the transid of our existing record
119 if (defrag->transid < entry->transid)
120 entry->transid = defrag->transid;
121 if (defrag->last_offset > entry->last_offset)
122 entry->last_offset = defrag->last_offset;
123 return -EEXIST;
126 set_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags);
127 rb_link_node(&defrag->rb_node, parent, p);
128 rb_insert_color(&defrag->rb_node, &root->fs_info->defrag_inodes);
129 return 0;
132 static inline int __need_auto_defrag(struct btrfs_root *root)
134 if (!btrfs_test_opt(root, AUTO_DEFRAG))
135 return 0;
137 if (btrfs_fs_closing(root->fs_info))
138 return 0;
140 return 1;
144 * insert a defrag record for this inode if auto defrag is
145 * enabled
147 int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
148 struct inode *inode)
150 struct btrfs_root *root = BTRFS_I(inode)->root;
151 struct inode_defrag *defrag;
152 u64 transid;
153 int ret;
155 if (!__need_auto_defrag(root))
156 return 0;
158 if (test_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags))
159 return 0;
161 if (trans)
162 transid = trans->transid;
163 else
164 transid = BTRFS_I(inode)->root->last_trans;
166 defrag = kmem_cache_zalloc(btrfs_inode_defrag_cachep, GFP_NOFS);
167 if (!defrag)
168 return -ENOMEM;
170 defrag->ino = btrfs_ino(inode);
171 defrag->transid = transid;
172 defrag->root = root->root_key.objectid;
174 spin_lock(&root->fs_info->defrag_inodes_lock);
175 if (!test_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags)) {
177 * If we set IN_DEFRAG flag and evict the inode from memory,
178 * and then re-read this inode, this new inode doesn't have
179 * IN_DEFRAG flag. At the case, we may find the existed defrag.
181 ret = __btrfs_add_inode_defrag(inode, defrag);
182 if (ret)
183 kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
184 } else {
185 kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
187 spin_unlock(&root->fs_info->defrag_inodes_lock);
188 return 0;
192 * Requeue the defrag object. If there is a defrag object that points to
193 * the same inode in the tree, we will merge them together (by
194 * __btrfs_add_inode_defrag()) and free the one that we want to requeue.
196 static void btrfs_requeue_inode_defrag(struct inode *inode,
197 struct inode_defrag *defrag)
199 struct btrfs_root *root = BTRFS_I(inode)->root;
200 int ret;
202 if (!__need_auto_defrag(root))
203 goto out;
206 * Here we don't check the IN_DEFRAG flag, because we need merge
207 * them together.
209 spin_lock(&root->fs_info->defrag_inodes_lock);
210 ret = __btrfs_add_inode_defrag(inode, defrag);
211 spin_unlock(&root->fs_info->defrag_inodes_lock);
212 if (ret)
213 goto out;
214 return;
215 out:
216 kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
220 * pick the defragable inode that we want, if it doesn't exist, we will get
221 * the next one.
223 static struct inode_defrag *
224 btrfs_pick_defrag_inode(struct btrfs_fs_info *fs_info, u64 root, u64 ino)
226 struct inode_defrag *entry = NULL;
227 struct inode_defrag tmp;
228 struct rb_node *p;
229 struct rb_node *parent = NULL;
230 int ret;
232 tmp.ino = ino;
233 tmp.root = root;
235 spin_lock(&fs_info->defrag_inodes_lock);
236 p = fs_info->defrag_inodes.rb_node;
237 while (p) {
238 parent = p;
239 entry = rb_entry(parent, struct inode_defrag, rb_node);
241 ret = __compare_inode_defrag(&tmp, entry);
242 if (ret < 0)
243 p = parent->rb_left;
244 else if (ret > 0)
245 p = parent->rb_right;
246 else
247 goto out;
250 if (parent && __compare_inode_defrag(&tmp, entry) > 0) {
251 parent = rb_next(parent);
252 if (parent)
253 entry = rb_entry(parent, struct inode_defrag, rb_node);
254 else
255 entry = NULL;
257 out:
258 if (entry)
259 rb_erase(parent, &fs_info->defrag_inodes);
260 spin_unlock(&fs_info->defrag_inodes_lock);
261 return entry;
264 void btrfs_cleanup_defrag_inodes(struct btrfs_fs_info *fs_info)
266 struct inode_defrag *defrag;
267 struct rb_node *node;
269 spin_lock(&fs_info->defrag_inodes_lock);
270 node = rb_first(&fs_info->defrag_inodes);
271 while (node) {
272 rb_erase(node, &fs_info->defrag_inodes);
273 defrag = rb_entry(node, struct inode_defrag, rb_node);
274 kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
276 if (need_resched()) {
277 spin_unlock(&fs_info->defrag_inodes_lock);
278 cond_resched();
279 spin_lock(&fs_info->defrag_inodes_lock);
282 node = rb_first(&fs_info->defrag_inodes);
284 spin_unlock(&fs_info->defrag_inodes_lock);
287 #define BTRFS_DEFRAG_BATCH 1024
289 static int __btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info,
290 struct inode_defrag *defrag)
292 struct btrfs_root *inode_root;
293 struct inode *inode;
294 struct btrfs_key key;
295 struct btrfs_ioctl_defrag_range_args range;
296 int num_defrag;
297 int index;
298 int ret;
300 /* get the inode */
301 key.objectid = defrag->root;
302 btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY);
303 key.offset = (u64)-1;
305 index = srcu_read_lock(&fs_info->subvol_srcu);
307 inode_root = btrfs_read_fs_root_no_name(fs_info, &key);
308 if (IS_ERR(inode_root)) {
309 ret = PTR_ERR(inode_root);
310 goto cleanup;
313 key.objectid = defrag->ino;
314 btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
315 key.offset = 0;
316 inode = btrfs_iget(fs_info->sb, &key, inode_root, NULL);
317 if (IS_ERR(inode)) {
318 ret = PTR_ERR(inode);
319 goto cleanup;
321 srcu_read_unlock(&fs_info->subvol_srcu, index);
323 /* do a chunk of defrag */
324 clear_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags);
325 memset(&range, 0, sizeof(range));
326 range.len = (u64)-1;
327 range.start = defrag->last_offset;
329 sb_start_write(fs_info->sb);
330 num_defrag = btrfs_defrag_file(inode, NULL, &range, defrag->transid,
331 BTRFS_DEFRAG_BATCH);
332 sb_end_write(fs_info->sb);
334 * if we filled the whole defrag batch, there
335 * must be more work to do. Queue this defrag
336 * again
338 if (num_defrag == BTRFS_DEFRAG_BATCH) {
339 defrag->last_offset = range.start;
340 btrfs_requeue_inode_defrag(inode, defrag);
341 } else if (defrag->last_offset && !defrag->cycled) {
343 * we didn't fill our defrag batch, but
344 * we didn't start at zero. Make sure we loop
345 * around to the start of the file.
347 defrag->last_offset = 0;
348 defrag->cycled = 1;
349 btrfs_requeue_inode_defrag(inode, defrag);
350 } else {
351 kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
354 iput(inode);
355 return 0;
356 cleanup:
357 srcu_read_unlock(&fs_info->subvol_srcu, index);
358 kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
359 return ret;
363 * run through the list of inodes in the FS that need
364 * defragging
366 int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info)
368 struct inode_defrag *defrag;
369 u64 first_ino = 0;
370 u64 root_objectid = 0;
372 atomic_inc(&fs_info->defrag_running);
373 while (1) {
374 /* Pause the auto defragger. */
375 if (test_bit(BTRFS_FS_STATE_REMOUNTING,
376 &fs_info->fs_state))
377 break;
379 if (!__need_auto_defrag(fs_info->tree_root))
380 break;
382 /* find an inode to defrag */
383 defrag = btrfs_pick_defrag_inode(fs_info, root_objectid,
384 first_ino);
385 if (!defrag) {
386 if (root_objectid || first_ino) {
387 root_objectid = 0;
388 first_ino = 0;
389 continue;
390 } else {
391 break;
395 first_ino = defrag->ino + 1;
396 root_objectid = defrag->root;
398 __btrfs_run_defrag_inode(fs_info, defrag);
400 atomic_dec(&fs_info->defrag_running);
403 * during unmount, we use the transaction_wait queue to
404 * wait for the defragger to stop
406 wake_up(&fs_info->transaction_wait);
407 return 0;
410 /* simple helper to fault in pages and copy. This should go away
411 * and be replaced with calls into generic code.
413 static noinline int btrfs_copy_from_user(loff_t pos, int num_pages,
414 size_t write_bytes,
415 struct page **prepared_pages,
416 struct iov_iter *i)
418 size_t copied = 0;
419 size_t total_copied = 0;
420 int pg = 0;
421 int offset = pos & (PAGE_CACHE_SIZE - 1);
423 while (write_bytes > 0) {
424 size_t count = min_t(size_t,
425 PAGE_CACHE_SIZE - offset, write_bytes);
426 struct page *page = prepared_pages[pg];
428 * Copy data from userspace to the current page
430 copied = iov_iter_copy_from_user_atomic(page, i, offset, count);
432 /* Flush processor's dcache for this page */
433 flush_dcache_page(page);
436 * if we get a partial write, we can end up with
437 * partially up to date pages. These add
438 * a lot of complexity, so make sure they don't
439 * happen by forcing this copy to be retried.
441 * The rest of the btrfs_file_write code will fall
442 * back to page at a time copies after we return 0.
444 if (!PageUptodate(page) && copied < count)
445 copied = 0;
447 iov_iter_advance(i, copied);
448 write_bytes -= copied;
449 total_copied += copied;
451 /* Return to btrfs_file_write_iter to fault page */
452 if (unlikely(copied == 0))
453 break;
455 if (unlikely(copied < PAGE_CACHE_SIZE - offset)) {
456 offset += copied;
457 } else {
458 pg++;
459 offset = 0;
462 return total_copied;
466 * unlocks pages after btrfs_file_write is done with them
468 static void btrfs_drop_pages(struct page **pages, size_t num_pages)
470 size_t i;
471 for (i = 0; i < num_pages; i++) {
472 /* page checked is some magic around finding pages that
473 * have been modified without going through btrfs_set_page_dirty
474 * clear it here. There should be no need to mark the pages
475 * accessed as prepare_pages should have marked them accessed
476 * in prepare_pages via find_or_create_page()
478 ClearPageChecked(pages[i]);
479 unlock_page(pages[i]);
480 page_cache_release(pages[i]);
485 * after copy_from_user, pages need to be dirtied and we need to make
486 * sure holes are created between the current EOF and the start of
487 * any next extents (if required).
489 * this also makes the decision about creating an inline extent vs
490 * doing real data extents, marking pages dirty and delalloc as required.
492 int btrfs_dirty_pages(struct btrfs_root *root, struct inode *inode,
493 struct page **pages, size_t num_pages,
494 loff_t pos, size_t write_bytes,
495 struct extent_state **cached)
497 int err = 0;
498 int i;
499 u64 num_bytes;
500 u64 start_pos;
501 u64 end_of_last_block;
502 u64 end_pos = pos + write_bytes;
503 loff_t isize = i_size_read(inode);
505 start_pos = pos & ~((u64)root->sectorsize - 1);
506 num_bytes = ALIGN(write_bytes + pos - start_pos, root->sectorsize);
508 end_of_last_block = start_pos + num_bytes - 1;
509 err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block,
510 cached);
511 if (err)
512 return err;
514 for (i = 0; i < num_pages; i++) {
515 struct page *p = pages[i];
516 SetPageUptodate(p);
517 ClearPageChecked(p);
518 set_page_dirty(p);
522 * we've only changed i_size in ram, and we haven't updated
523 * the disk i_size. There is no need to log the inode
524 * at this time.
526 if (end_pos > isize)
527 i_size_write(inode, end_pos);
528 return 0;
532 * this drops all the extents in the cache that intersect the range
533 * [start, end]. Existing extents are split as required.
535 void btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
536 int skip_pinned)
538 struct extent_map *em;
539 struct extent_map *split = NULL;
540 struct extent_map *split2 = NULL;
541 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
542 u64 len = end - start + 1;
543 u64 gen;
544 int ret;
545 int testend = 1;
546 unsigned long flags;
547 int compressed = 0;
548 bool modified;
550 WARN_ON(end < start);
551 if (end == (u64)-1) {
552 len = (u64)-1;
553 testend = 0;
555 while (1) {
556 int no_splits = 0;
558 modified = false;
559 if (!split)
560 split = alloc_extent_map();
561 if (!split2)
562 split2 = alloc_extent_map();
563 if (!split || !split2)
564 no_splits = 1;
566 write_lock(&em_tree->lock);
567 em = lookup_extent_mapping(em_tree, start, len);
568 if (!em) {
569 write_unlock(&em_tree->lock);
570 break;
572 flags = em->flags;
573 gen = em->generation;
574 if (skip_pinned && test_bit(EXTENT_FLAG_PINNED, &em->flags)) {
575 if (testend && em->start + em->len >= start + len) {
576 free_extent_map(em);
577 write_unlock(&em_tree->lock);
578 break;
580 start = em->start + em->len;
581 if (testend)
582 len = start + len - (em->start + em->len);
583 free_extent_map(em);
584 write_unlock(&em_tree->lock);
585 continue;
587 compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
588 clear_bit(EXTENT_FLAG_PINNED, &em->flags);
589 clear_bit(EXTENT_FLAG_LOGGING, &flags);
590 modified = !list_empty(&em->list);
591 if (no_splits)
592 goto next;
594 if (em->start < start) {
595 split->start = em->start;
596 split->len = start - em->start;
598 if (em->block_start < EXTENT_MAP_LAST_BYTE) {
599 split->orig_start = em->orig_start;
600 split->block_start = em->block_start;
602 if (compressed)
603 split->block_len = em->block_len;
604 else
605 split->block_len = split->len;
606 split->orig_block_len = max(split->block_len,
607 em->orig_block_len);
608 split->ram_bytes = em->ram_bytes;
609 } else {
610 split->orig_start = split->start;
611 split->block_len = 0;
612 split->block_start = em->block_start;
613 split->orig_block_len = 0;
614 split->ram_bytes = split->len;
617 split->generation = gen;
618 split->bdev = em->bdev;
619 split->flags = flags;
620 split->compress_type = em->compress_type;
621 replace_extent_mapping(em_tree, em, split, modified);
622 free_extent_map(split);
623 split = split2;
624 split2 = NULL;
626 if (testend && em->start + em->len > start + len) {
627 u64 diff = start + len - em->start;
629 split->start = start + len;
630 split->len = em->start + em->len - (start + len);
631 split->bdev = em->bdev;
632 split->flags = flags;
633 split->compress_type = em->compress_type;
634 split->generation = gen;
636 if (em->block_start < EXTENT_MAP_LAST_BYTE) {
637 split->orig_block_len = max(em->block_len,
638 em->orig_block_len);
640 split->ram_bytes = em->ram_bytes;
641 if (compressed) {
642 split->block_len = em->block_len;
643 split->block_start = em->block_start;
644 split->orig_start = em->orig_start;
645 } else {
646 split->block_len = split->len;
647 split->block_start = em->block_start
648 + diff;
649 split->orig_start = em->orig_start;
651 } else {
652 split->ram_bytes = split->len;
653 split->orig_start = split->start;
654 split->block_len = 0;
655 split->block_start = em->block_start;
656 split->orig_block_len = 0;
659 if (extent_map_in_tree(em)) {
660 replace_extent_mapping(em_tree, em, split,
661 modified);
662 } else {
663 ret = add_extent_mapping(em_tree, split,
664 modified);
665 ASSERT(ret == 0); /* Logic error */
667 free_extent_map(split);
668 split = NULL;
670 next:
671 if (extent_map_in_tree(em))
672 remove_extent_mapping(em_tree, em);
673 write_unlock(&em_tree->lock);
675 /* once for us */
676 free_extent_map(em);
677 /* once for the tree*/
678 free_extent_map(em);
680 if (split)
681 free_extent_map(split);
682 if (split2)
683 free_extent_map(split2);
687 * this is very complex, but the basic idea is to drop all extents
688 * in the range start - end. hint_block is filled in with a block number
689 * that would be a good hint to the block allocator for this file.
691 * If an extent intersects the range but is not entirely inside the range
692 * it is either truncated or split. Anything entirely inside the range
693 * is deleted from the tree.
695 int __btrfs_drop_extents(struct btrfs_trans_handle *trans,
696 struct btrfs_root *root, struct inode *inode,
697 struct btrfs_path *path, u64 start, u64 end,
698 u64 *drop_end, int drop_cache,
699 int replace_extent,
700 u32 extent_item_size,
701 int *key_inserted)
703 struct extent_buffer *leaf;
704 struct btrfs_file_extent_item *fi;
705 struct btrfs_key key;
706 struct btrfs_key new_key;
707 u64 ino = btrfs_ino(inode);
708 u64 search_start = start;
709 u64 disk_bytenr = 0;
710 u64 num_bytes = 0;
711 u64 extent_offset = 0;
712 u64 extent_end = 0;
713 int del_nr = 0;
714 int del_slot = 0;
715 int extent_type;
716 int recow;
717 int ret;
718 int modify_tree = -1;
719 int update_refs;
720 int found = 0;
721 int leafs_visited = 0;
723 if (drop_cache)
724 btrfs_drop_extent_cache(inode, start, end - 1, 0);
726 if (start >= BTRFS_I(inode)->disk_i_size && !replace_extent)
727 modify_tree = 0;
729 update_refs = (test_bit(BTRFS_ROOT_REF_COWS, &root->state) ||
730 root == root->fs_info->tree_root);
731 while (1) {
732 recow = 0;
733 ret = btrfs_lookup_file_extent(trans, root, path, ino,
734 search_start, modify_tree);
735 if (ret < 0)
736 break;
737 if (ret > 0 && path->slots[0] > 0 && search_start == start) {
738 leaf = path->nodes[0];
739 btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1);
740 if (key.objectid == ino &&
741 key.type == BTRFS_EXTENT_DATA_KEY)
742 path->slots[0]--;
744 ret = 0;
745 leafs_visited++;
746 next_slot:
747 leaf = path->nodes[0];
748 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
749 BUG_ON(del_nr > 0);
750 ret = btrfs_next_leaf(root, path);
751 if (ret < 0)
752 break;
753 if (ret > 0) {
754 ret = 0;
755 break;
757 leafs_visited++;
758 leaf = path->nodes[0];
759 recow = 1;
762 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
764 if (key.objectid > ino)
765 break;
766 if (WARN_ON_ONCE(key.objectid < ino) ||
767 key.type < BTRFS_EXTENT_DATA_KEY) {
768 ASSERT(del_nr == 0);
769 path->slots[0]++;
770 goto next_slot;
772 if (key.type > BTRFS_EXTENT_DATA_KEY || key.offset >= end)
773 break;
775 fi = btrfs_item_ptr(leaf, path->slots[0],
776 struct btrfs_file_extent_item);
777 extent_type = btrfs_file_extent_type(leaf, fi);
779 if (extent_type == BTRFS_FILE_EXTENT_REG ||
780 extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
781 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
782 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
783 extent_offset = btrfs_file_extent_offset(leaf, fi);
784 extent_end = key.offset +
785 btrfs_file_extent_num_bytes(leaf, fi);
786 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
787 extent_end = key.offset +
788 btrfs_file_extent_inline_len(leaf,
789 path->slots[0], fi);
790 } else {
791 /* can't happen */
792 BUG();
796 * Don't skip extent items representing 0 byte lengths. They
797 * used to be created (bug) if while punching holes we hit
798 * -ENOSPC condition. So if we find one here, just ensure we
799 * delete it, otherwise we would insert a new file extent item
800 * with the same key (offset) as that 0 bytes length file
801 * extent item in the call to setup_items_for_insert() later
802 * in this function.
804 if (extent_end == key.offset && extent_end >= search_start)
805 goto delete_extent_item;
807 if (extent_end <= search_start) {
808 path->slots[0]++;
809 goto next_slot;
812 found = 1;
813 search_start = max(key.offset, start);
814 if (recow || !modify_tree) {
815 modify_tree = -1;
816 btrfs_release_path(path);
817 continue;
821 * | - range to drop - |
822 * | -------- extent -------- |
824 if (start > key.offset && end < extent_end) {
825 BUG_ON(del_nr > 0);
826 if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
827 ret = -EOPNOTSUPP;
828 break;
831 memcpy(&new_key, &key, sizeof(new_key));
832 new_key.offset = start;
833 ret = btrfs_duplicate_item(trans, root, path,
834 &new_key);
835 if (ret == -EAGAIN) {
836 btrfs_release_path(path);
837 continue;
839 if (ret < 0)
840 break;
842 leaf = path->nodes[0];
843 fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
844 struct btrfs_file_extent_item);
845 btrfs_set_file_extent_num_bytes(leaf, fi,
846 start - key.offset);
848 fi = btrfs_item_ptr(leaf, path->slots[0],
849 struct btrfs_file_extent_item);
851 extent_offset += start - key.offset;
852 btrfs_set_file_extent_offset(leaf, fi, extent_offset);
853 btrfs_set_file_extent_num_bytes(leaf, fi,
854 extent_end - start);
855 btrfs_mark_buffer_dirty(leaf);
857 if (update_refs && disk_bytenr > 0) {
858 ret = btrfs_inc_extent_ref(trans, root,
859 disk_bytenr, num_bytes, 0,
860 root->root_key.objectid,
861 new_key.objectid,
862 start - extent_offset, 1);
863 BUG_ON(ret); /* -ENOMEM */
865 key.offset = start;
868 * | ---- range to drop ----- |
869 * | -------- extent -------- |
871 if (start <= key.offset && end < extent_end) {
872 if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
873 ret = -EOPNOTSUPP;
874 break;
877 memcpy(&new_key, &key, sizeof(new_key));
878 new_key.offset = end;
879 btrfs_set_item_key_safe(root, path, &new_key);
881 extent_offset += end - key.offset;
882 btrfs_set_file_extent_offset(leaf, fi, extent_offset);
883 btrfs_set_file_extent_num_bytes(leaf, fi,
884 extent_end - end);
885 btrfs_mark_buffer_dirty(leaf);
886 if (update_refs && disk_bytenr > 0)
887 inode_sub_bytes(inode, end - key.offset);
888 break;
891 search_start = extent_end;
893 * | ---- range to drop ----- |
894 * | -------- extent -------- |
896 if (start > key.offset && end >= extent_end) {
897 BUG_ON(del_nr > 0);
898 if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
899 ret = -EOPNOTSUPP;
900 break;
903 btrfs_set_file_extent_num_bytes(leaf, fi,
904 start - key.offset);
905 btrfs_mark_buffer_dirty(leaf);
906 if (update_refs && disk_bytenr > 0)
907 inode_sub_bytes(inode, extent_end - start);
908 if (end == extent_end)
909 break;
911 path->slots[0]++;
912 goto next_slot;
916 * | ---- range to drop ----- |
917 * | ------ extent ------ |
919 if (start <= key.offset && end >= extent_end) {
920 delete_extent_item:
921 if (del_nr == 0) {
922 del_slot = path->slots[0];
923 del_nr = 1;
924 } else {
925 BUG_ON(del_slot + del_nr != path->slots[0]);
926 del_nr++;
929 if (update_refs &&
930 extent_type == BTRFS_FILE_EXTENT_INLINE) {
931 inode_sub_bytes(inode,
932 extent_end - key.offset);
933 extent_end = ALIGN(extent_end,
934 root->sectorsize);
935 } else if (update_refs && disk_bytenr > 0) {
936 ret = btrfs_free_extent(trans, root,
937 disk_bytenr, num_bytes, 0,
938 root->root_key.objectid,
939 key.objectid, key.offset -
940 extent_offset, 0);
941 BUG_ON(ret); /* -ENOMEM */
942 inode_sub_bytes(inode,
943 extent_end - key.offset);
946 if (end == extent_end)
947 break;
949 if (path->slots[0] + 1 < btrfs_header_nritems(leaf)) {
950 path->slots[0]++;
951 goto next_slot;
954 ret = btrfs_del_items(trans, root, path, del_slot,
955 del_nr);
956 if (ret) {
957 btrfs_abort_transaction(trans, root, ret);
958 break;
961 del_nr = 0;
962 del_slot = 0;
964 btrfs_release_path(path);
965 continue;
968 BUG_ON(1);
971 if (!ret && del_nr > 0) {
973 * Set path->slots[0] to first slot, so that after the delete
974 * if items are move off from our leaf to its immediate left or
975 * right neighbor leafs, we end up with a correct and adjusted
976 * path->slots[0] for our insertion (if replace_extent != 0).
978 path->slots[0] = del_slot;
979 ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
980 if (ret)
981 btrfs_abort_transaction(trans, root, ret);
984 leaf = path->nodes[0];
986 * If btrfs_del_items() was called, it might have deleted a leaf, in
987 * which case it unlocked our path, so check path->locks[0] matches a
988 * write lock.
990 if (!ret && replace_extent && leafs_visited == 1 &&
991 (path->locks[0] == BTRFS_WRITE_LOCK_BLOCKING ||
992 path->locks[0] == BTRFS_WRITE_LOCK) &&
993 btrfs_leaf_free_space(root, leaf) >=
994 sizeof(struct btrfs_item) + extent_item_size) {
996 key.objectid = ino;
997 key.type = BTRFS_EXTENT_DATA_KEY;
998 key.offset = start;
999 if (!del_nr && path->slots[0] < btrfs_header_nritems(leaf)) {
1000 struct btrfs_key slot_key;
1002 btrfs_item_key_to_cpu(leaf, &slot_key, path->slots[0]);
1003 if (btrfs_comp_cpu_keys(&key, &slot_key) > 0)
1004 path->slots[0]++;
1006 setup_items_for_insert(root, path, &key,
1007 &extent_item_size,
1008 extent_item_size,
1009 sizeof(struct btrfs_item) +
1010 extent_item_size, 1);
1011 *key_inserted = 1;
1014 if (!replace_extent || !(*key_inserted))
1015 btrfs_release_path(path);
1016 if (drop_end)
1017 *drop_end = found ? min(end, extent_end) : end;
1018 return ret;
1021 int btrfs_drop_extents(struct btrfs_trans_handle *trans,
1022 struct btrfs_root *root, struct inode *inode, u64 start,
1023 u64 end, int drop_cache)
1025 struct btrfs_path *path;
1026 int ret;
1028 path = btrfs_alloc_path();
1029 if (!path)
1030 return -ENOMEM;
1031 ret = __btrfs_drop_extents(trans, root, inode, path, start, end, NULL,
1032 drop_cache, 0, 0, NULL);
1033 btrfs_free_path(path);
1034 return ret;
1037 static int extent_mergeable(struct extent_buffer *leaf, int slot,
1038 u64 objectid, u64 bytenr, u64 orig_offset,
1039 u64 *start, u64 *end)
1041 struct btrfs_file_extent_item *fi;
1042 struct btrfs_key key;
1043 u64 extent_end;
1045 if (slot < 0 || slot >= btrfs_header_nritems(leaf))
1046 return 0;
1048 btrfs_item_key_to_cpu(leaf, &key, slot);
1049 if (key.objectid != objectid || key.type != BTRFS_EXTENT_DATA_KEY)
1050 return 0;
1052 fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
1053 if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG ||
1054 btrfs_file_extent_disk_bytenr(leaf, fi) != bytenr ||
1055 btrfs_file_extent_offset(leaf, fi) != key.offset - orig_offset ||
1056 btrfs_file_extent_compression(leaf, fi) ||
1057 btrfs_file_extent_encryption(leaf, fi) ||
1058 btrfs_file_extent_other_encoding(leaf, fi))
1059 return 0;
1061 extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
1062 if ((*start && *start != key.offset) || (*end && *end != extent_end))
1063 return 0;
1065 *start = key.offset;
1066 *end = extent_end;
1067 return 1;
1071 * Mark extent in the range start - end as written.
1073 * This changes extent type from 'pre-allocated' to 'regular'. If only
1074 * part of extent is marked as written, the extent will be split into
1075 * two or three.
1077 int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
1078 struct inode *inode, u64 start, u64 end)
1080 struct btrfs_root *root = BTRFS_I(inode)->root;
1081 struct extent_buffer *leaf;
1082 struct btrfs_path *path;
1083 struct btrfs_file_extent_item *fi;
1084 struct btrfs_key key;
1085 struct btrfs_key new_key;
1086 u64 bytenr;
1087 u64 num_bytes;
1088 u64 extent_end;
1089 u64 orig_offset;
1090 u64 other_start;
1091 u64 other_end;
1092 u64 split;
1093 int del_nr = 0;
1094 int del_slot = 0;
1095 int recow;
1096 int ret;
1097 u64 ino = btrfs_ino(inode);
1099 path = btrfs_alloc_path();
1100 if (!path)
1101 return -ENOMEM;
1102 again:
1103 recow = 0;
1104 split = start;
1105 key.objectid = ino;
1106 key.type = BTRFS_EXTENT_DATA_KEY;
1107 key.offset = split;
1109 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1110 if (ret < 0)
1111 goto out;
1112 if (ret > 0 && path->slots[0] > 0)
1113 path->slots[0]--;
1115 leaf = path->nodes[0];
1116 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1117 BUG_ON(key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY);
1118 fi = btrfs_item_ptr(leaf, path->slots[0],
1119 struct btrfs_file_extent_item);
1120 BUG_ON(btrfs_file_extent_type(leaf, fi) !=
1121 BTRFS_FILE_EXTENT_PREALLOC);
1122 extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
1123 BUG_ON(key.offset > start || extent_end < end);
1125 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1126 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
1127 orig_offset = key.offset - btrfs_file_extent_offset(leaf, fi);
1128 memcpy(&new_key, &key, sizeof(new_key));
1130 if (start == key.offset && end < extent_end) {
1131 other_start = 0;
1132 other_end = start;
1133 if (extent_mergeable(leaf, path->slots[0] - 1,
1134 ino, bytenr, orig_offset,
1135 &other_start, &other_end)) {
1136 new_key.offset = end;
1137 btrfs_set_item_key_safe(root, path, &new_key);
1138 fi = btrfs_item_ptr(leaf, path->slots[0],
1139 struct btrfs_file_extent_item);
1140 btrfs_set_file_extent_generation(leaf, fi,
1141 trans->transid);
1142 btrfs_set_file_extent_num_bytes(leaf, fi,
1143 extent_end - end);
1144 btrfs_set_file_extent_offset(leaf, fi,
1145 end - orig_offset);
1146 fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
1147 struct btrfs_file_extent_item);
1148 btrfs_set_file_extent_generation(leaf, fi,
1149 trans->transid);
1150 btrfs_set_file_extent_num_bytes(leaf, fi,
1151 end - other_start);
1152 btrfs_mark_buffer_dirty(leaf);
1153 goto out;
1157 if (start > key.offset && end == extent_end) {
1158 other_start = end;
1159 other_end = 0;
1160 if (extent_mergeable(leaf, path->slots[0] + 1,
1161 ino, bytenr, orig_offset,
1162 &other_start, &other_end)) {
1163 fi = btrfs_item_ptr(leaf, path->slots[0],
1164 struct btrfs_file_extent_item);
1165 btrfs_set_file_extent_num_bytes(leaf, fi,
1166 start - key.offset);
1167 btrfs_set_file_extent_generation(leaf, fi,
1168 trans->transid);
1169 path->slots[0]++;
1170 new_key.offset = start;
1171 btrfs_set_item_key_safe(root, path, &new_key);
1173 fi = btrfs_item_ptr(leaf, path->slots[0],
1174 struct btrfs_file_extent_item);
1175 btrfs_set_file_extent_generation(leaf, fi,
1176 trans->transid);
1177 btrfs_set_file_extent_num_bytes(leaf, fi,
1178 other_end - start);
1179 btrfs_set_file_extent_offset(leaf, fi,
1180 start - orig_offset);
1181 btrfs_mark_buffer_dirty(leaf);
1182 goto out;
1186 while (start > key.offset || end < extent_end) {
1187 if (key.offset == start)
1188 split = end;
1190 new_key.offset = split;
1191 ret = btrfs_duplicate_item(trans, root, path, &new_key);
1192 if (ret == -EAGAIN) {
1193 btrfs_release_path(path);
1194 goto again;
1196 if (ret < 0) {
1197 btrfs_abort_transaction(trans, root, ret);
1198 goto out;
1201 leaf = path->nodes[0];
1202 fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
1203 struct btrfs_file_extent_item);
1204 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1205 btrfs_set_file_extent_num_bytes(leaf, fi,
1206 split - key.offset);
1208 fi = btrfs_item_ptr(leaf, path->slots[0],
1209 struct btrfs_file_extent_item);
1211 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1212 btrfs_set_file_extent_offset(leaf, fi, split - orig_offset);
1213 btrfs_set_file_extent_num_bytes(leaf, fi,
1214 extent_end - split);
1215 btrfs_mark_buffer_dirty(leaf);
1217 ret = btrfs_inc_extent_ref(trans, root, bytenr, num_bytes, 0,
1218 root->root_key.objectid,
1219 ino, orig_offset, 1);
1220 BUG_ON(ret); /* -ENOMEM */
1222 if (split == start) {
1223 key.offset = start;
1224 } else {
1225 BUG_ON(start != key.offset);
1226 path->slots[0]--;
1227 extent_end = end;
1229 recow = 1;
1232 other_start = end;
1233 other_end = 0;
1234 if (extent_mergeable(leaf, path->slots[0] + 1,
1235 ino, bytenr, orig_offset,
1236 &other_start, &other_end)) {
1237 if (recow) {
1238 btrfs_release_path(path);
1239 goto again;
1241 extent_end = other_end;
1242 del_slot = path->slots[0] + 1;
1243 del_nr++;
1244 ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
1245 0, root->root_key.objectid,
1246 ino, orig_offset, 0);
1247 BUG_ON(ret); /* -ENOMEM */
1249 other_start = 0;
1250 other_end = start;
1251 if (extent_mergeable(leaf, path->slots[0] - 1,
1252 ino, bytenr, orig_offset,
1253 &other_start, &other_end)) {
1254 if (recow) {
1255 btrfs_release_path(path);
1256 goto again;
1258 key.offset = other_start;
1259 del_slot = path->slots[0];
1260 del_nr++;
1261 ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
1262 0, root->root_key.objectid,
1263 ino, orig_offset, 0);
1264 BUG_ON(ret); /* -ENOMEM */
1266 if (del_nr == 0) {
1267 fi = btrfs_item_ptr(leaf, path->slots[0],
1268 struct btrfs_file_extent_item);
1269 btrfs_set_file_extent_type(leaf, fi,
1270 BTRFS_FILE_EXTENT_REG);
1271 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1272 btrfs_mark_buffer_dirty(leaf);
1273 } else {
1274 fi = btrfs_item_ptr(leaf, del_slot - 1,
1275 struct btrfs_file_extent_item);
1276 btrfs_set_file_extent_type(leaf, fi,
1277 BTRFS_FILE_EXTENT_REG);
1278 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1279 btrfs_set_file_extent_num_bytes(leaf, fi,
1280 extent_end - key.offset);
1281 btrfs_mark_buffer_dirty(leaf);
1283 ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
1284 if (ret < 0) {
1285 btrfs_abort_transaction(trans, root, ret);
1286 goto out;
1289 out:
1290 btrfs_free_path(path);
1291 return 0;
1295 * on error we return an unlocked page and the error value
1296 * on success we return a locked page and 0
1298 static int prepare_uptodate_page(struct page *page, u64 pos,
1299 bool force_uptodate)
1301 int ret = 0;
1303 if (((pos & (PAGE_CACHE_SIZE - 1)) || force_uptodate) &&
1304 !PageUptodate(page)) {
1305 ret = btrfs_readpage(NULL, page);
1306 if (ret)
1307 return ret;
1308 lock_page(page);
1309 if (!PageUptodate(page)) {
1310 unlock_page(page);
1311 return -EIO;
1314 return 0;
1318 * this just gets pages into the page cache and locks them down.
1320 static noinline int prepare_pages(struct inode *inode, struct page **pages,
1321 size_t num_pages, loff_t pos,
1322 size_t write_bytes, bool force_uptodate)
1324 int i;
1325 unsigned long index = pos >> PAGE_CACHE_SHIFT;
1326 gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
1327 int err = 0;
1328 int faili;
1330 for (i = 0; i < num_pages; i++) {
1331 pages[i] = find_or_create_page(inode->i_mapping, index + i,
1332 mask | __GFP_WRITE);
1333 if (!pages[i]) {
1334 faili = i - 1;
1335 err = -ENOMEM;
1336 goto fail;
1339 if (i == 0)
1340 err = prepare_uptodate_page(pages[i], pos,
1341 force_uptodate);
1342 if (i == num_pages - 1)
1343 err = prepare_uptodate_page(pages[i],
1344 pos + write_bytes, false);
1345 if (err) {
1346 page_cache_release(pages[i]);
1347 faili = i - 1;
1348 goto fail;
1350 wait_on_page_writeback(pages[i]);
1353 return 0;
1354 fail:
1355 while (faili >= 0) {
1356 unlock_page(pages[faili]);
1357 page_cache_release(pages[faili]);
1358 faili--;
1360 return err;
1365 * This function locks the extent and properly waits for data=ordered extents
1366 * to finish before allowing the pages to be modified if need.
1368 * The return value:
1369 * 1 - the extent is locked
1370 * 0 - the extent is not locked, and everything is OK
1371 * -EAGAIN - need re-prepare the pages
1372 * the other < 0 number - Something wrong happens
1374 static noinline int
1375 lock_and_cleanup_extent_if_need(struct inode *inode, struct page **pages,
1376 size_t num_pages, loff_t pos,
1377 u64 *lockstart, u64 *lockend,
1378 struct extent_state **cached_state)
1380 u64 start_pos;
1381 u64 last_pos;
1382 int i;
1383 int ret = 0;
1385 start_pos = pos & ~((u64)PAGE_CACHE_SIZE - 1);
1386 last_pos = start_pos + ((u64)num_pages << PAGE_CACHE_SHIFT) - 1;
1388 if (start_pos < inode->i_size) {
1389 struct btrfs_ordered_extent *ordered;
1390 lock_extent_bits(&BTRFS_I(inode)->io_tree,
1391 start_pos, last_pos, 0, cached_state);
1392 ordered = btrfs_lookup_ordered_range(inode, start_pos,
1393 last_pos - start_pos + 1);
1394 if (ordered &&
1395 ordered->file_offset + ordered->len > start_pos &&
1396 ordered->file_offset <= last_pos) {
1397 unlock_extent_cached(&BTRFS_I(inode)->io_tree,
1398 start_pos, last_pos,
1399 cached_state, GFP_NOFS);
1400 for (i = 0; i < num_pages; i++) {
1401 unlock_page(pages[i]);
1402 page_cache_release(pages[i]);
1404 btrfs_start_ordered_extent(inode, ordered, 1);
1405 btrfs_put_ordered_extent(ordered);
1406 return -EAGAIN;
1408 if (ordered)
1409 btrfs_put_ordered_extent(ordered);
1411 clear_extent_bit(&BTRFS_I(inode)->io_tree, start_pos,
1412 last_pos, EXTENT_DIRTY | EXTENT_DELALLOC |
1413 EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
1414 0, 0, cached_state, GFP_NOFS);
1415 *lockstart = start_pos;
1416 *lockend = last_pos;
1417 ret = 1;
1420 for (i = 0; i < num_pages; i++) {
1421 if (clear_page_dirty_for_io(pages[i]))
1422 account_page_redirty(pages[i]);
1423 set_page_extent_mapped(pages[i]);
1424 WARN_ON(!PageLocked(pages[i]));
1427 return ret;
1430 static noinline int check_can_nocow(struct inode *inode, loff_t pos,
1431 size_t *write_bytes)
1433 struct btrfs_root *root = BTRFS_I(inode)->root;
1434 struct btrfs_ordered_extent *ordered;
1435 u64 lockstart, lockend;
1436 u64 num_bytes;
1437 int ret;
1439 ret = btrfs_start_nocow_write(root);
1440 if (!ret)
1441 return -ENOSPC;
1443 lockstart = round_down(pos, root->sectorsize);
1444 lockend = round_up(pos + *write_bytes, root->sectorsize) - 1;
1446 while (1) {
1447 lock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend);
1448 ordered = btrfs_lookup_ordered_range(inode, lockstart,
1449 lockend - lockstart + 1);
1450 if (!ordered) {
1451 break;
1453 unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend);
1454 btrfs_start_ordered_extent(inode, ordered, 1);
1455 btrfs_put_ordered_extent(ordered);
1458 num_bytes = lockend - lockstart + 1;
1459 ret = can_nocow_extent(inode, lockstart, &num_bytes, NULL, NULL, NULL);
1460 if (ret <= 0) {
1461 ret = 0;
1462 btrfs_end_nocow_write(root);
1463 } else {
1464 *write_bytes = min_t(size_t, *write_bytes ,
1465 num_bytes - pos + lockstart);
1468 unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend);
1470 return ret;
1473 static noinline ssize_t __btrfs_buffered_write(struct file *file,
1474 struct iov_iter *i,
1475 loff_t pos)
1477 struct inode *inode = file_inode(file);
1478 struct btrfs_root *root = BTRFS_I(inode)->root;
1479 struct page **pages = NULL;
1480 struct extent_state *cached_state = NULL;
1481 u64 release_bytes = 0;
1482 u64 lockstart;
1483 u64 lockend;
1484 unsigned long first_index;
1485 size_t num_written = 0;
1486 int nrptrs;
1487 int ret = 0;
1488 bool only_release_metadata = false;
1489 bool force_page_uptodate = false;
1490 bool need_unlock;
1492 nrptrs = min((iov_iter_count(i) + PAGE_CACHE_SIZE - 1) /
1493 PAGE_CACHE_SIZE, PAGE_CACHE_SIZE /
1494 (sizeof(struct page *)));
1495 nrptrs = min(nrptrs, current->nr_dirtied_pause - current->nr_dirtied);
1496 nrptrs = max(nrptrs, 8);
1497 pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL);
1498 if (!pages)
1499 return -ENOMEM;
1501 first_index = pos >> PAGE_CACHE_SHIFT;
1503 while (iov_iter_count(i) > 0) {
1504 size_t offset = pos & (PAGE_CACHE_SIZE - 1);
1505 size_t write_bytes = min(iov_iter_count(i),
1506 nrptrs * (size_t)PAGE_CACHE_SIZE -
1507 offset);
1508 size_t num_pages = (write_bytes + offset +
1509 PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
1510 size_t reserve_bytes;
1511 size_t dirty_pages;
1512 size_t copied;
1514 WARN_ON(num_pages > nrptrs);
1517 * Fault pages before locking them in prepare_pages
1518 * to avoid recursive lock
1520 if (unlikely(iov_iter_fault_in_readable(i, write_bytes))) {
1521 ret = -EFAULT;
1522 break;
1525 reserve_bytes = num_pages << PAGE_CACHE_SHIFT;
1526 ret = btrfs_check_data_free_space(inode, reserve_bytes);
1527 if (ret == -ENOSPC &&
1528 (BTRFS_I(inode)->flags & (BTRFS_INODE_NODATACOW |
1529 BTRFS_INODE_PREALLOC))) {
1530 ret = check_can_nocow(inode, pos, &write_bytes);
1531 if (ret > 0) {
1532 only_release_metadata = true;
1534 * our prealloc extent may be smaller than
1535 * write_bytes, so scale down.
1537 num_pages = (write_bytes + offset +
1538 PAGE_CACHE_SIZE - 1) >>
1539 PAGE_CACHE_SHIFT;
1540 reserve_bytes = num_pages << PAGE_CACHE_SHIFT;
1541 ret = 0;
1542 } else {
1543 ret = -ENOSPC;
1547 if (ret)
1548 break;
1550 ret = btrfs_delalloc_reserve_metadata(inode, reserve_bytes);
1551 if (ret) {
1552 if (!only_release_metadata)
1553 btrfs_free_reserved_data_space(inode,
1554 reserve_bytes);
1555 else
1556 btrfs_end_nocow_write(root);
1557 break;
1560 release_bytes = reserve_bytes;
1561 need_unlock = false;
1562 again:
1564 * This is going to setup the pages array with the number of
1565 * pages we want, so we don't really need to worry about the
1566 * contents of pages from loop to loop
1568 ret = prepare_pages(inode, pages, num_pages,
1569 pos, write_bytes,
1570 force_page_uptodate);
1571 if (ret)
1572 break;
1574 ret = lock_and_cleanup_extent_if_need(inode, pages, num_pages,
1575 pos, &lockstart, &lockend,
1576 &cached_state);
1577 if (ret < 0) {
1578 if (ret == -EAGAIN)
1579 goto again;
1580 break;
1581 } else if (ret > 0) {
1582 need_unlock = true;
1583 ret = 0;
1586 copied = btrfs_copy_from_user(pos, num_pages,
1587 write_bytes, pages, i);
1590 * if we have trouble faulting in the pages, fall
1591 * back to one page at a time
1593 if (copied < write_bytes)
1594 nrptrs = 1;
1596 if (copied == 0) {
1597 force_page_uptodate = true;
1598 dirty_pages = 0;
1599 } else {
1600 force_page_uptodate = false;
1601 dirty_pages = (copied + offset +
1602 PAGE_CACHE_SIZE - 1) >>
1603 PAGE_CACHE_SHIFT;
1607 * If we had a short copy we need to release the excess delaloc
1608 * bytes we reserved. We need to increment outstanding_extents
1609 * because btrfs_delalloc_release_space will decrement it, but
1610 * we still have an outstanding extent for the chunk we actually
1611 * managed to copy.
1613 if (num_pages > dirty_pages) {
1614 release_bytes = (num_pages - dirty_pages) <<
1615 PAGE_CACHE_SHIFT;
1616 if (copied > 0) {
1617 spin_lock(&BTRFS_I(inode)->lock);
1618 BTRFS_I(inode)->outstanding_extents++;
1619 spin_unlock(&BTRFS_I(inode)->lock);
1621 if (only_release_metadata)
1622 btrfs_delalloc_release_metadata(inode,
1623 release_bytes);
1624 else
1625 btrfs_delalloc_release_space(inode,
1626 release_bytes);
1629 release_bytes = dirty_pages << PAGE_CACHE_SHIFT;
1631 if (copied > 0)
1632 ret = btrfs_dirty_pages(root, inode, pages,
1633 dirty_pages, pos, copied,
1634 NULL);
1635 if (need_unlock)
1636 unlock_extent_cached(&BTRFS_I(inode)->io_tree,
1637 lockstart, lockend, &cached_state,
1638 GFP_NOFS);
1639 if (ret) {
1640 btrfs_drop_pages(pages, num_pages);
1641 break;
1644 release_bytes = 0;
1645 if (only_release_metadata)
1646 btrfs_end_nocow_write(root);
1648 if (only_release_metadata && copied > 0) {
1649 u64 lockstart = round_down(pos, root->sectorsize);
1650 u64 lockend = lockstart +
1651 (dirty_pages << PAGE_CACHE_SHIFT) - 1;
1653 set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
1654 lockend, EXTENT_NORESERVE, NULL,
1655 NULL, GFP_NOFS);
1656 only_release_metadata = false;
1659 btrfs_drop_pages(pages, num_pages);
1661 cond_resched();
1663 balance_dirty_pages_ratelimited(inode->i_mapping);
1664 if (dirty_pages < (root->leafsize >> PAGE_CACHE_SHIFT) + 1)
1665 btrfs_btree_balance_dirty(root);
1667 pos += copied;
1668 num_written += copied;
1671 kfree(pages);
1673 if (release_bytes) {
1674 if (only_release_metadata) {
1675 btrfs_end_nocow_write(root);
1676 btrfs_delalloc_release_metadata(inode, release_bytes);
1677 } else {
1678 btrfs_delalloc_release_space(inode, release_bytes);
1682 return num_written ? num_written : ret;
1685 static ssize_t __btrfs_direct_write(struct kiocb *iocb,
1686 struct iov_iter *from,
1687 loff_t pos)
1689 struct file *file = iocb->ki_filp;
1690 ssize_t written;
1691 ssize_t written_buffered;
1692 loff_t endbyte;
1693 int err;
1695 written = generic_file_direct_write(iocb, from, pos);
1697 if (written < 0 || !iov_iter_count(from))
1698 return written;
1700 pos += written;
1701 written_buffered = __btrfs_buffered_write(file, from, pos);
1702 if (written_buffered < 0) {
1703 err = written_buffered;
1704 goto out;
1706 endbyte = pos + written_buffered - 1;
1707 err = filemap_write_and_wait_range(file->f_mapping, pos, endbyte);
1708 if (err)
1709 goto out;
1710 written += written_buffered;
1711 iocb->ki_pos = pos + written_buffered;
1712 invalidate_mapping_pages(file->f_mapping, pos >> PAGE_CACHE_SHIFT,
1713 endbyte >> PAGE_CACHE_SHIFT);
1714 out:
1715 return written ? written : err;
1718 static void update_time_for_write(struct inode *inode)
1720 struct timespec now;
1722 if (IS_NOCMTIME(inode))
1723 return;
1725 now = current_fs_time(inode->i_sb);
1726 if (!timespec_equal(&inode->i_mtime, &now))
1727 inode->i_mtime = now;
1729 if (!timespec_equal(&inode->i_ctime, &now))
1730 inode->i_ctime = now;
1732 if (IS_I_VERSION(inode))
1733 inode_inc_iversion(inode);
1736 static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
1737 struct iov_iter *from)
1739 struct file *file = iocb->ki_filp;
1740 struct inode *inode = file_inode(file);
1741 struct btrfs_root *root = BTRFS_I(inode)->root;
1742 u64 start_pos;
1743 u64 end_pos;
1744 ssize_t num_written = 0;
1745 ssize_t err = 0;
1746 size_t count = iov_iter_count(from);
1747 bool sync = (file->f_flags & O_DSYNC) || IS_SYNC(file->f_mapping->host);
1748 loff_t pos = iocb->ki_pos;
1750 mutex_lock(&inode->i_mutex);
1752 current->backing_dev_info = inode->i_mapping->backing_dev_info;
1753 err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
1754 if (err) {
1755 mutex_unlock(&inode->i_mutex);
1756 goto out;
1759 if (count == 0) {
1760 mutex_unlock(&inode->i_mutex);
1761 goto out;
1764 iov_iter_truncate(from, count);
1766 err = file_remove_suid(file);
1767 if (err) {
1768 mutex_unlock(&inode->i_mutex);
1769 goto out;
1773 * If BTRFS flips readonly due to some impossible error
1774 * (fs_info->fs_state now has BTRFS_SUPER_FLAG_ERROR),
1775 * although we have opened a file as writable, we have
1776 * to stop this write operation to ensure FS consistency.
1778 if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state)) {
1779 mutex_unlock(&inode->i_mutex);
1780 err = -EROFS;
1781 goto out;
1785 * We reserve space for updating the inode when we reserve space for the
1786 * extent we are going to write, so we will enospc out there. We don't
1787 * need to start yet another transaction to update the inode as we will
1788 * update the inode when we finish writing whatever data we write.
1790 update_time_for_write(inode);
1792 start_pos = round_down(pos, root->sectorsize);
1793 if (start_pos > i_size_read(inode)) {
1794 /* Expand hole size to cover write data, preventing empty gap */
1795 end_pos = round_up(pos + count, root->sectorsize);
1796 err = btrfs_cont_expand(inode, i_size_read(inode), end_pos);
1797 if (err) {
1798 mutex_unlock(&inode->i_mutex);
1799 goto out;
1803 if (sync)
1804 atomic_inc(&BTRFS_I(inode)->sync_writers);
1806 if (unlikely(file->f_flags & O_DIRECT)) {
1807 num_written = __btrfs_direct_write(iocb, from, pos);
1808 } else {
1809 num_written = __btrfs_buffered_write(file, from, pos);
1810 if (num_written > 0)
1811 iocb->ki_pos = pos + num_written;
1814 mutex_unlock(&inode->i_mutex);
1817 * We also have to set last_sub_trans to the current log transid,
1818 * otherwise subsequent syncs to a file that's been synced in this
1819 * transaction will appear to have already occured.
1821 BTRFS_I(inode)->last_sub_trans = root->log_transid;
1822 if (num_written > 0) {
1823 err = generic_write_sync(file, pos, num_written);
1824 if (err < 0)
1825 num_written = err;
1828 if (sync)
1829 atomic_dec(&BTRFS_I(inode)->sync_writers);
1830 out:
1831 current->backing_dev_info = NULL;
1832 return num_written ? num_written : err;
1835 int btrfs_release_file(struct inode *inode, struct file *filp)
1837 if (filp->private_data)
1838 btrfs_ioctl_trans_end(filp);
1840 * ordered_data_close is set by settattr when we are about to truncate
1841 * a file from a non-zero size to a zero size. This tries to
1842 * flush down new bytes that may have been written if the
1843 * application were using truncate to replace a file in place.
1845 if (test_and_clear_bit(BTRFS_INODE_ORDERED_DATA_CLOSE,
1846 &BTRFS_I(inode)->runtime_flags))
1847 filemap_flush(inode->i_mapping);
1848 return 0;
1852 * fsync call for both files and directories. This logs the inode into
1853 * the tree log instead of forcing full commits whenever possible.
1855 * It needs to call filemap_fdatawait so that all ordered extent updates are
1856 * in the metadata btree are up to date for copying to the log.
1858 * It drops the inode mutex before doing the tree log commit. This is an
1859 * important optimization for directories because holding the mutex prevents
1860 * new operations on the dir while we write to disk.
1862 int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
1864 struct dentry *dentry = file->f_path.dentry;
1865 struct inode *inode = dentry->d_inode;
1866 struct btrfs_root *root = BTRFS_I(inode)->root;
1867 struct btrfs_trans_handle *trans;
1868 struct btrfs_log_ctx ctx;
1869 int ret = 0;
1870 bool full_sync = 0;
1871 u64 len;
1874 * The range length can be represented by u64, we have to do the typecasts
1875 * to avoid signed overflow if it's [0, LLONG_MAX] eg. from fsync()
1877 len = (u64)end - (u64)start + 1;
1878 trace_btrfs_sync_file(file, datasync);
1881 * We write the dirty pages in the range and wait until they complete
1882 * out of the ->i_mutex. If so, we can flush the dirty pages by
1883 * multi-task, and make the performance up. See
1884 * btrfs_wait_ordered_range for an explanation of the ASYNC check.
1886 atomic_inc(&BTRFS_I(inode)->sync_writers);
1887 ret = filemap_fdatawrite_range(inode->i_mapping, start, end);
1888 if (!ret && test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
1889 &BTRFS_I(inode)->runtime_flags))
1890 ret = filemap_fdatawrite_range(inode->i_mapping, start, end);
1891 atomic_dec(&BTRFS_I(inode)->sync_writers);
1892 if (ret)
1893 return ret;
1895 mutex_lock(&inode->i_mutex);
1898 * We flush the dirty pages again to avoid some dirty pages in the
1899 * range being left.
1901 atomic_inc(&root->log_batch);
1902 full_sync = test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
1903 &BTRFS_I(inode)->runtime_flags);
1904 if (full_sync) {
1905 ret = btrfs_wait_ordered_range(inode, start, len);
1906 if (ret) {
1907 mutex_unlock(&inode->i_mutex);
1908 goto out;
1911 atomic_inc(&root->log_batch);
1914 * If the last transaction that changed this file was before the current
1915 * transaction and we have the full sync flag set in our inode, we can
1916 * bail out now without any syncing.
1918 * Note that we can't bail out if the full sync flag isn't set. This is
1919 * because when the full sync flag is set we start all ordered extents
1920 * and wait for them to fully complete - when they complete they update
1921 * the inode's last_trans field through:
1923 * btrfs_finish_ordered_io() ->
1924 * btrfs_update_inode_fallback() ->
1925 * btrfs_update_inode() ->
1926 * btrfs_set_inode_last_trans()
1928 * So we are sure that last_trans is up to date and can do this check to
1929 * bail out safely. For the fast path, when the full sync flag is not
1930 * set in our inode, we can not do it because we start only our ordered
1931 * extents and don't wait for them to complete (that is when
1932 * btrfs_finish_ordered_io runs), so here at this point their last_trans
1933 * value might be less than or equals to fs_info->last_trans_committed,
1934 * and setting a speculative last_trans for an inode when a buffered
1935 * write is made (such as fs_info->generation + 1 for example) would not
1936 * be reliable since after setting the value and before fsync is called
1937 * any number of transactions can start and commit (transaction kthread
1938 * commits the current transaction periodically), and a transaction
1939 * commit does not start nor waits for ordered extents to complete.
1941 smp_mb();
1942 if (btrfs_inode_in_log(inode, root->fs_info->generation) ||
1943 (full_sync && BTRFS_I(inode)->last_trans <=
1944 root->fs_info->last_trans_committed)) {
1946 * We'v had everything committed since the last time we were
1947 * modified so clear this flag in case it was set for whatever
1948 * reason, it's no longer relevant.
1950 clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
1951 &BTRFS_I(inode)->runtime_flags);
1952 mutex_unlock(&inode->i_mutex);
1953 goto out;
1957 * ok we haven't committed the transaction yet, lets do a commit
1959 if (file->private_data)
1960 btrfs_ioctl_trans_end(file);
1963 * We use start here because we will need to wait on the IO to complete
1964 * in btrfs_sync_log, which could require joining a transaction (for
1965 * example checking cross references in the nocow path). If we use join
1966 * here we could get into a situation where we're waiting on IO to
1967 * happen that is blocked on a transaction trying to commit. With start
1968 * we inc the extwriter counter, so we wait for all extwriters to exit
1969 * before we start blocking join'ers. This comment is to keep somebody
1970 * from thinking they are super smart and changing this to
1971 * btrfs_join_transaction *cough*Josef*cough*.
1973 trans = btrfs_start_transaction(root, 0);
1974 if (IS_ERR(trans)) {
1975 ret = PTR_ERR(trans);
1976 mutex_unlock(&inode->i_mutex);
1977 goto out;
1979 trans->sync = true;
1981 btrfs_init_log_ctx(&ctx);
1983 ret = btrfs_log_dentry_safe(trans, root, dentry, &ctx);
1984 if (ret < 0) {
1985 /* Fallthrough and commit/free transaction. */
1986 ret = 1;
1989 /* we've logged all the items and now have a consistent
1990 * version of the file in the log. It is possible that
1991 * someone will come in and modify the file, but that's
1992 * fine because the log is consistent on disk, and we
1993 * have references to all of the file's extents
1995 * It is possible that someone will come in and log the
1996 * file again, but that will end up using the synchronization
1997 * inside btrfs_sync_log to keep things safe.
1999 mutex_unlock(&inode->i_mutex);
2001 if (ret != BTRFS_NO_LOG_SYNC) {
2002 if (!ret) {
2003 ret = btrfs_sync_log(trans, root, &ctx);
2004 if (!ret) {
2005 ret = btrfs_end_transaction(trans, root);
2006 goto out;
2009 if (!full_sync) {
2010 ret = btrfs_wait_ordered_range(inode, start, len);
2011 if (ret) {
2012 btrfs_end_transaction(trans, root);
2013 goto out;
2016 ret = btrfs_commit_transaction(trans, root);
2017 } else {
2018 ret = btrfs_end_transaction(trans, root);
2020 out:
2021 return ret > 0 ? -EIO : ret;
2024 static const struct vm_operations_struct btrfs_file_vm_ops = {
2025 .fault = filemap_fault,
2026 .map_pages = filemap_map_pages,
2027 .page_mkwrite = btrfs_page_mkwrite,
2028 .remap_pages = generic_file_remap_pages,
2031 static int btrfs_file_mmap(struct file *filp, struct vm_area_struct *vma)
2033 struct address_space *mapping = filp->f_mapping;
2035 if (!mapping->a_ops->readpage)
2036 return -ENOEXEC;
2038 file_accessed(filp);
2039 vma->vm_ops = &btrfs_file_vm_ops;
2041 return 0;
2044 static int hole_mergeable(struct inode *inode, struct extent_buffer *leaf,
2045 int slot, u64 start, u64 end)
2047 struct btrfs_file_extent_item *fi;
2048 struct btrfs_key key;
2050 if (slot < 0 || slot >= btrfs_header_nritems(leaf))
2051 return 0;
2053 btrfs_item_key_to_cpu(leaf, &key, slot);
2054 if (key.objectid != btrfs_ino(inode) ||
2055 key.type != BTRFS_EXTENT_DATA_KEY)
2056 return 0;
2058 fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
2060 if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG)
2061 return 0;
2063 if (btrfs_file_extent_disk_bytenr(leaf, fi))
2064 return 0;
2066 if (key.offset == end)
2067 return 1;
2068 if (key.offset + btrfs_file_extent_num_bytes(leaf, fi) == start)
2069 return 1;
2070 return 0;
2073 static int fill_holes(struct btrfs_trans_handle *trans, struct inode *inode,
2074 struct btrfs_path *path, u64 offset, u64 end)
2076 struct btrfs_root *root = BTRFS_I(inode)->root;
2077 struct extent_buffer *leaf;
2078 struct btrfs_file_extent_item *fi;
2079 struct extent_map *hole_em;
2080 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
2081 struct btrfs_key key;
2082 int ret;
2084 if (btrfs_fs_incompat(root->fs_info, NO_HOLES))
2085 goto out;
2087 key.objectid = btrfs_ino(inode);
2088 key.type = BTRFS_EXTENT_DATA_KEY;
2089 key.offset = offset;
2091 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2092 if (ret < 0)
2093 return ret;
2094 BUG_ON(!ret);
2096 leaf = path->nodes[0];
2097 if (hole_mergeable(inode, leaf, path->slots[0]-1, offset, end)) {
2098 u64 num_bytes;
2100 path->slots[0]--;
2101 fi = btrfs_item_ptr(leaf, path->slots[0],
2102 struct btrfs_file_extent_item);
2103 num_bytes = btrfs_file_extent_num_bytes(leaf, fi) +
2104 end - offset;
2105 btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
2106 btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
2107 btrfs_set_file_extent_offset(leaf, fi, 0);
2108 btrfs_mark_buffer_dirty(leaf);
2109 goto out;
2112 if (hole_mergeable(inode, leaf, path->slots[0]+1, offset, end)) {
2113 u64 num_bytes;
2115 path->slots[0]++;
2116 key.offset = offset;
2117 btrfs_set_item_key_safe(root, path, &key);
2118 fi = btrfs_item_ptr(leaf, path->slots[0],
2119 struct btrfs_file_extent_item);
2120 num_bytes = btrfs_file_extent_num_bytes(leaf, fi) + end -
2121 offset;
2122 btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
2123 btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
2124 btrfs_set_file_extent_offset(leaf, fi, 0);
2125 btrfs_mark_buffer_dirty(leaf);
2126 goto out;
2128 btrfs_release_path(path);
2130 ret = btrfs_insert_file_extent(trans, root, btrfs_ino(inode), offset,
2131 0, 0, end - offset, 0, end - offset,
2132 0, 0, 0);
2133 if (ret)
2134 return ret;
2136 out:
2137 btrfs_release_path(path);
2139 hole_em = alloc_extent_map();
2140 if (!hole_em) {
2141 btrfs_drop_extent_cache(inode, offset, end - 1, 0);
2142 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
2143 &BTRFS_I(inode)->runtime_flags);
2144 } else {
2145 hole_em->start = offset;
2146 hole_em->len = end - offset;
2147 hole_em->ram_bytes = hole_em->len;
2148 hole_em->orig_start = offset;
2150 hole_em->block_start = EXTENT_MAP_HOLE;
2151 hole_em->block_len = 0;
2152 hole_em->orig_block_len = 0;
2153 hole_em->bdev = root->fs_info->fs_devices->latest_bdev;
2154 hole_em->compress_type = BTRFS_COMPRESS_NONE;
2155 hole_em->generation = trans->transid;
2157 do {
2158 btrfs_drop_extent_cache(inode, offset, end - 1, 0);
2159 write_lock(&em_tree->lock);
2160 ret = add_extent_mapping(em_tree, hole_em, 1);
2161 write_unlock(&em_tree->lock);
2162 } while (ret == -EEXIST);
2163 free_extent_map(hole_em);
2164 if (ret)
2165 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
2166 &BTRFS_I(inode)->runtime_flags);
2169 return 0;
2173 * Find a hole extent on given inode and change start/len to the end of hole
2174 * extent.(hole/vacuum extent whose em->start <= start &&
2175 * em->start + em->len > start)
2176 * When a hole extent is found, return 1 and modify start/len.
2178 static int find_first_non_hole(struct inode *inode, u64 *start, u64 *len)
2180 struct extent_map *em;
2181 int ret = 0;
2183 em = btrfs_get_extent(inode, NULL, 0, *start, *len, 0);
2184 if (IS_ERR_OR_NULL(em)) {
2185 if (!em)
2186 ret = -ENOMEM;
2187 else
2188 ret = PTR_ERR(em);
2189 return ret;
2192 /* Hole or vacuum extent(only exists in no-hole mode) */
2193 if (em->block_start == EXTENT_MAP_HOLE) {
2194 ret = 1;
2195 *len = em->start + em->len > *start + *len ?
2196 0 : *start + *len - em->start - em->len;
2197 *start = em->start + em->len;
2199 free_extent_map(em);
2200 return ret;
2203 static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
2205 struct btrfs_root *root = BTRFS_I(inode)->root;
2206 struct extent_state *cached_state = NULL;
2207 struct btrfs_path *path;
2208 struct btrfs_block_rsv *rsv;
2209 struct btrfs_trans_handle *trans;
2210 u64 lockstart;
2211 u64 lockend;
2212 u64 tail_start;
2213 u64 tail_len;
2214 u64 orig_start = offset;
2215 u64 cur_offset;
2216 u64 min_size = btrfs_calc_trunc_metadata_size(root, 1);
2217 u64 drop_end;
2218 int ret = 0;
2219 int err = 0;
2220 int rsv_count;
2221 bool same_page;
2222 bool no_holes = btrfs_fs_incompat(root->fs_info, NO_HOLES);
2223 u64 ino_size;
2225 ret = btrfs_wait_ordered_range(inode, offset, len);
2226 if (ret)
2227 return ret;
2229 mutex_lock(&inode->i_mutex);
2230 ino_size = round_up(inode->i_size, PAGE_CACHE_SIZE);
2231 ret = find_first_non_hole(inode, &offset, &len);
2232 if (ret < 0)
2233 goto out_only_mutex;
2234 if (ret && !len) {
2235 /* Already in a large hole */
2236 ret = 0;
2237 goto out_only_mutex;
2240 lockstart = round_up(offset , BTRFS_I(inode)->root->sectorsize);
2241 lockend = round_down(offset + len,
2242 BTRFS_I(inode)->root->sectorsize) - 1;
2243 same_page = ((offset >> PAGE_CACHE_SHIFT) ==
2244 ((offset + len - 1) >> PAGE_CACHE_SHIFT));
2247 * We needn't truncate any page which is beyond the end of the file
2248 * because we are sure there is no data there.
2251 * Only do this if we are in the same page and we aren't doing the
2252 * entire page.
2254 if (same_page && len < PAGE_CACHE_SIZE) {
2255 if (offset < ino_size)
2256 ret = btrfs_truncate_page(inode, offset, len, 0);
2257 goto out_only_mutex;
2260 /* zero back part of the first page */
2261 if (offset < ino_size) {
2262 ret = btrfs_truncate_page(inode, offset, 0, 0);
2263 if (ret) {
2264 mutex_unlock(&inode->i_mutex);
2265 return ret;
2269 /* Check the aligned pages after the first unaligned page,
2270 * if offset != orig_start, which means the first unaligned page
2271 * including serveral following pages are already in holes,
2272 * the extra check can be skipped */
2273 if (offset == orig_start) {
2274 /* after truncate page, check hole again */
2275 len = offset + len - lockstart;
2276 offset = lockstart;
2277 ret = find_first_non_hole(inode, &offset, &len);
2278 if (ret < 0)
2279 goto out_only_mutex;
2280 if (ret && !len) {
2281 ret = 0;
2282 goto out_only_mutex;
2284 lockstart = offset;
2287 /* Check the tail unaligned part is in a hole */
2288 tail_start = lockend + 1;
2289 tail_len = offset + len - tail_start;
2290 if (tail_len) {
2291 ret = find_first_non_hole(inode, &tail_start, &tail_len);
2292 if (unlikely(ret < 0))
2293 goto out_only_mutex;
2294 if (!ret) {
2295 /* zero the front end of the last page */
2296 if (tail_start + tail_len < ino_size) {
2297 ret = btrfs_truncate_page(inode,
2298 tail_start + tail_len, 0, 1);
2299 if (ret)
2300 goto out_only_mutex;
2305 if (lockend < lockstart) {
2306 mutex_unlock(&inode->i_mutex);
2307 return 0;
2310 while (1) {
2311 struct btrfs_ordered_extent *ordered;
2313 truncate_pagecache_range(inode, lockstart, lockend);
2315 lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
2316 0, &cached_state);
2317 ordered = btrfs_lookup_first_ordered_extent(inode, lockend);
2320 * We need to make sure we have no ordered extents in this range
2321 * and nobody raced in and read a page in this range, if we did
2322 * we need to try again.
2324 if ((!ordered ||
2325 (ordered->file_offset + ordered->len <= lockstart ||
2326 ordered->file_offset > lockend)) &&
2327 !btrfs_page_exists_in_range(inode, lockstart, lockend)) {
2328 if (ordered)
2329 btrfs_put_ordered_extent(ordered);
2330 break;
2332 if (ordered)
2333 btrfs_put_ordered_extent(ordered);
2334 unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart,
2335 lockend, &cached_state, GFP_NOFS);
2336 ret = btrfs_wait_ordered_range(inode, lockstart,
2337 lockend - lockstart + 1);
2338 if (ret) {
2339 mutex_unlock(&inode->i_mutex);
2340 return ret;
2344 path = btrfs_alloc_path();
2345 if (!path) {
2346 ret = -ENOMEM;
2347 goto out;
2350 rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP);
2351 if (!rsv) {
2352 ret = -ENOMEM;
2353 goto out_free;
2355 rsv->size = btrfs_calc_trunc_metadata_size(root, 1);
2356 rsv->failfast = 1;
2359 * 1 - update the inode
2360 * 1 - removing the extents in the range
2361 * 1 - adding the hole extent if no_holes isn't set
2363 rsv_count = no_holes ? 2 : 3;
2364 trans = btrfs_start_transaction(root, rsv_count);
2365 if (IS_ERR(trans)) {
2366 err = PTR_ERR(trans);
2367 goto out_free;
2370 ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv, rsv,
2371 min_size);
2372 BUG_ON(ret);
2373 trans->block_rsv = rsv;
2375 cur_offset = lockstart;
2376 len = lockend - cur_offset;
2377 while (cur_offset < lockend) {
2378 ret = __btrfs_drop_extents(trans, root, inode, path,
2379 cur_offset, lockend + 1,
2380 &drop_end, 1, 0, 0, NULL);
2381 if (ret != -ENOSPC)
2382 break;
2384 trans->block_rsv = &root->fs_info->trans_block_rsv;
2386 if (cur_offset < ino_size) {
2387 ret = fill_holes(trans, inode, path, cur_offset,
2388 drop_end);
2389 if (ret) {
2390 err = ret;
2391 break;
2395 cur_offset = drop_end;
2397 ret = btrfs_update_inode(trans, root, inode);
2398 if (ret) {
2399 err = ret;
2400 break;
2403 btrfs_end_transaction(trans, root);
2404 btrfs_btree_balance_dirty(root);
2406 trans = btrfs_start_transaction(root, rsv_count);
2407 if (IS_ERR(trans)) {
2408 ret = PTR_ERR(trans);
2409 trans = NULL;
2410 break;
2413 ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv,
2414 rsv, min_size);
2415 BUG_ON(ret); /* shouldn't happen */
2416 trans->block_rsv = rsv;
2418 ret = find_first_non_hole(inode, &cur_offset, &len);
2419 if (unlikely(ret < 0))
2420 break;
2421 if (ret && !len) {
2422 ret = 0;
2423 break;
2427 if (ret) {
2428 err = ret;
2429 goto out_trans;
2432 trans->block_rsv = &root->fs_info->trans_block_rsv;
2434 * Don't insert file hole extent item if it's for a range beyond eof
2435 * (because it's useless) or if it represents a 0 bytes range (when
2436 * cur_offset == drop_end).
2438 if (cur_offset < ino_size && cur_offset < drop_end) {
2439 ret = fill_holes(trans, inode, path, cur_offset, drop_end);
2440 if (ret) {
2441 err = ret;
2442 goto out_trans;
2446 out_trans:
2447 if (!trans)
2448 goto out_free;
2450 inode_inc_iversion(inode);
2451 inode->i_mtime = inode->i_ctime = CURRENT_TIME;
2453 trans->block_rsv = &root->fs_info->trans_block_rsv;
2454 ret = btrfs_update_inode(trans, root, inode);
2455 btrfs_end_transaction(trans, root);
2456 btrfs_btree_balance_dirty(root);
2457 out_free:
2458 btrfs_free_path(path);
2459 btrfs_free_block_rsv(root, rsv);
2460 out:
2461 unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
2462 &cached_state, GFP_NOFS);
2463 out_only_mutex:
2464 mutex_unlock(&inode->i_mutex);
2465 if (ret && !err)
2466 err = ret;
2467 return err;
2470 static long btrfs_fallocate(struct file *file, int mode,
2471 loff_t offset, loff_t len)
2473 struct inode *inode = file_inode(file);
2474 struct extent_state *cached_state = NULL;
2475 struct btrfs_root *root = BTRFS_I(inode)->root;
2476 u64 cur_offset;
2477 u64 last_byte;
2478 u64 alloc_start;
2479 u64 alloc_end;
2480 u64 alloc_hint = 0;
2481 u64 locked_end;
2482 struct extent_map *em;
2483 int blocksize = BTRFS_I(inode)->root->sectorsize;
2484 int ret;
2486 alloc_start = round_down(offset, blocksize);
2487 alloc_end = round_up(offset + len, blocksize);
2489 /* Make sure we aren't being give some crap mode */
2490 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
2491 return -EOPNOTSUPP;
2493 if (mode & FALLOC_FL_PUNCH_HOLE)
2494 return btrfs_punch_hole(inode, offset, len);
2497 * Make sure we have enough space before we do the
2498 * allocation.
2500 ret = btrfs_check_data_free_space(inode, alloc_end - alloc_start);
2501 if (ret)
2502 return ret;
2503 if (root->fs_info->quota_enabled) {
2504 ret = btrfs_qgroup_reserve(root, alloc_end - alloc_start);
2505 if (ret)
2506 goto out_reserve_fail;
2509 mutex_lock(&inode->i_mutex);
2510 ret = inode_newsize_ok(inode, alloc_end);
2511 if (ret)
2512 goto out;
2514 if (alloc_start > inode->i_size) {
2515 ret = btrfs_cont_expand(inode, i_size_read(inode),
2516 alloc_start);
2517 if (ret)
2518 goto out;
2519 } else {
2521 * If we are fallocating from the end of the file onward we
2522 * need to zero out the end of the page if i_size lands in the
2523 * middle of a page.
2525 ret = btrfs_truncate_page(inode, inode->i_size, 0, 0);
2526 if (ret)
2527 goto out;
2531 * wait for ordered IO before we have any locks. We'll loop again
2532 * below with the locks held.
2534 ret = btrfs_wait_ordered_range(inode, alloc_start,
2535 alloc_end - alloc_start);
2536 if (ret)
2537 goto out;
2539 locked_end = alloc_end - 1;
2540 while (1) {
2541 struct btrfs_ordered_extent *ordered;
2543 /* the extent lock is ordered inside the running
2544 * transaction
2546 lock_extent_bits(&BTRFS_I(inode)->io_tree, alloc_start,
2547 locked_end, 0, &cached_state);
2548 ordered = btrfs_lookup_first_ordered_extent(inode,
2549 alloc_end - 1);
2550 if (ordered &&
2551 ordered->file_offset + ordered->len > alloc_start &&
2552 ordered->file_offset < alloc_end) {
2553 btrfs_put_ordered_extent(ordered);
2554 unlock_extent_cached(&BTRFS_I(inode)->io_tree,
2555 alloc_start, locked_end,
2556 &cached_state, GFP_NOFS);
2558 * we can't wait on the range with the transaction
2559 * running or with the extent lock held
2561 ret = btrfs_wait_ordered_range(inode, alloc_start,
2562 alloc_end - alloc_start);
2563 if (ret)
2564 goto out;
2565 } else {
2566 if (ordered)
2567 btrfs_put_ordered_extent(ordered);
2568 break;
2572 cur_offset = alloc_start;
2573 while (1) {
2574 u64 actual_end;
2576 em = btrfs_get_extent(inode, NULL, 0, cur_offset,
2577 alloc_end - cur_offset, 0);
2578 if (IS_ERR_OR_NULL(em)) {
2579 if (!em)
2580 ret = -ENOMEM;
2581 else
2582 ret = PTR_ERR(em);
2583 break;
2585 last_byte = min(extent_map_end(em), alloc_end);
2586 actual_end = min_t(u64, extent_map_end(em), offset + len);
2587 last_byte = ALIGN(last_byte, blocksize);
2589 if (em->block_start == EXTENT_MAP_HOLE ||
2590 (cur_offset >= inode->i_size &&
2591 !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
2592 ret = btrfs_prealloc_file_range(inode, mode, cur_offset,
2593 last_byte - cur_offset,
2594 1 << inode->i_blkbits,
2595 offset + len,
2596 &alloc_hint);
2598 if (ret < 0) {
2599 free_extent_map(em);
2600 break;
2602 } else if (actual_end > inode->i_size &&
2603 !(mode & FALLOC_FL_KEEP_SIZE)) {
2605 * We didn't need to allocate any more space, but we
2606 * still extended the size of the file so we need to
2607 * update i_size.
2609 inode->i_ctime = CURRENT_TIME;
2610 i_size_write(inode, actual_end);
2611 btrfs_ordered_update_i_size(inode, actual_end, NULL);
2613 free_extent_map(em);
2615 cur_offset = last_byte;
2616 if (cur_offset >= alloc_end) {
2617 ret = 0;
2618 break;
2621 unlock_extent_cached(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
2622 &cached_state, GFP_NOFS);
2623 out:
2624 mutex_unlock(&inode->i_mutex);
2625 if (root->fs_info->quota_enabled)
2626 btrfs_qgroup_free(root, alloc_end - alloc_start);
2627 out_reserve_fail:
2628 /* Let go of our reservation. */
2629 btrfs_free_reserved_data_space(inode, alloc_end - alloc_start);
2630 return ret;
2633 static int find_desired_extent(struct inode *inode, loff_t *offset, int whence)
2635 struct btrfs_root *root = BTRFS_I(inode)->root;
2636 struct extent_map *em = NULL;
2637 struct extent_state *cached_state = NULL;
2638 u64 lockstart;
2639 u64 lockend;
2640 u64 start;
2641 u64 len;
2642 int ret = 0;
2644 if (inode->i_size == 0)
2645 return -ENXIO;
2648 * *offset can be negative, in this case we start finding DATA/HOLE from
2649 * the very start of the file.
2651 start = max_t(loff_t, 0, *offset);
2653 lockstart = round_down(start, root->sectorsize);
2654 lockend = round_up(i_size_read(inode), root->sectorsize);
2655 if (lockend <= lockstart)
2656 lockend = lockstart + root->sectorsize;
2657 lockend--;
2658 len = lockend - lockstart + 1;
2660 lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend, 0,
2661 &cached_state);
2663 while (start < inode->i_size) {
2664 em = btrfs_get_extent_fiemap(inode, NULL, 0, start, len, 0);
2665 if (IS_ERR(em)) {
2666 ret = PTR_ERR(em);
2667 em = NULL;
2668 break;
2671 if (whence == SEEK_HOLE &&
2672 (em->block_start == EXTENT_MAP_HOLE ||
2673 test_bit(EXTENT_FLAG_PREALLOC, &em->flags)))
2674 break;
2675 else if (whence == SEEK_DATA &&
2676 (em->block_start != EXTENT_MAP_HOLE &&
2677 !test_bit(EXTENT_FLAG_PREALLOC, &em->flags)))
2678 break;
2680 start = em->start + em->len;
2681 free_extent_map(em);
2682 em = NULL;
2683 cond_resched();
2685 free_extent_map(em);
2686 if (!ret) {
2687 if (whence == SEEK_DATA && start >= inode->i_size)
2688 ret = -ENXIO;
2689 else
2690 *offset = min_t(loff_t, start, inode->i_size);
2692 unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
2693 &cached_state, GFP_NOFS);
2694 return ret;
2697 static loff_t btrfs_file_llseek(struct file *file, loff_t offset, int whence)
2699 struct inode *inode = file->f_mapping->host;
2700 int ret;
2702 mutex_lock(&inode->i_mutex);
2703 switch (whence) {
2704 case SEEK_END:
2705 case SEEK_CUR:
2706 offset = generic_file_llseek(file, offset, whence);
2707 goto out;
2708 case SEEK_DATA:
2709 case SEEK_HOLE:
2710 if (offset >= i_size_read(inode)) {
2711 mutex_unlock(&inode->i_mutex);
2712 return -ENXIO;
2715 ret = find_desired_extent(inode, &offset, whence);
2716 if (ret) {
2717 mutex_unlock(&inode->i_mutex);
2718 return ret;
2722 offset = vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
2723 out:
2724 mutex_unlock(&inode->i_mutex);
2725 return offset;
2728 const struct file_operations btrfs_file_operations = {
2729 .llseek = btrfs_file_llseek,
2730 .read = new_sync_read,
2731 .write = new_sync_write,
2732 .read_iter = generic_file_read_iter,
2733 .splice_read = generic_file_splice_read,
2734 .write_iter = btrfs_file_write_iter,
2735 .mmap = btrfs_file_mmap,
2736 .open = generic_file_open,
2737 .release = btrfs_release_file,
2738 .fsync = btrfs_sync_file,
2739 .fallocate = btrfs_fallocate,
2740 .unlocked_ioctl = btrfs_ioctl,
2741 #ifdef CONFIG_COMPAT
2742 .compat_ioctl = btrfs_ioctl,
2743 #endif
2746 void btrfs_auto_defrag_exit(void)
2748 if (btrfs_inode_defrag_cachep)
2749 kmem_cache_destroy(btrfs_inode_defrag_cachep);
2752 int btrfs_auto_defrag_init(void)
2754 btrfs_inode_defrag_cachep = kmem_cache_create("btrfs_inode_defrag",
2755 sizeof(struct inode_defrag), 0,
2756 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
2757 NULL);
2758 if (!btrfs_inode_defrag_cachep)
2759 return -ENOMEM;
2761 return 0;