davinci: major rework of clock, PLL, PSC infrastructure
[linux-ginger.git] / fs / btrfs / inode.c
blob65219f6a16a1fd9229d0a6de8bcfe1fdc0f8ae8b
1 /*
2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/kernel.h>
20 #include <linux/bio.h>
21 #include <linux/buffer_head.h>
22 #include <linux/file.h>
23 #include <linux/fs.h>
24 #include <linux/pagemap.h>
25 #include <linux/highmem.h>
26 #include <linux/time.h>
27 #include <linux/init.h>
28 #include <linux/string.h>
29 #include <linux/smp_lock.h>
30 #include <linux/backing-dev.h>
31 #include <linux/mpage.h>
32 #include <linux/swap.h>
33 #include <linux/writeback.h>
34 #include <linux/statfs.h>
35 #include <linux/compat.h>
36 #include <linux/bit_spinlock.h>
37 #include <linux/xattr.h>
38 #include <linux/posix_acl.h>
39 #include <linux/falloc.h>
40 #include "compat.h"
41 #include "ctree.h"
42 #include "disk-io.h"
43 #include "transaction.h"
44 #include "btrfs_inode.h"
45 #include "ioctl.h"
46 #include "print-tree.h"
47 #include "volumes.h"
48 #include "ordered-data.h"
49 #include "xattr.h"
50 #include "tree-log.h"
51 #include "ref-cache.h"
52 #include "compression.h"
53 #include "locking.h"
55 struct btrfs_iget_args {
56 u64 ino;
57 struct btrfs_root *root;
60 static struct inode_operations btrfs_dir_inode_operations;
61 static struct inode_operations btrfs_symlink_inode_operations;
62 static struct inode_operations btrfs_dir_ro_inode_operations;
63 static struct inode_operations btrfs_special_inode_operations;
64 static struct inode_operations btrfs_file_inode_operations;
65 static struct address_space_operations btrfs_aops;
66 static struct address_space_operations btrfs_symlink_aops;
67 static struct file_operations btrfs_dir_file_operations;
68 static struct extent_io_ops btrfs_extent_io_ops;
70 static struct kmem_cache *btrfs_inode_cachep;
71 struct kmem_cache *btrfs_trans_handle_cachep;
72 struct kmem_cache *btrfs_transaction_cachep;
73 struct kmem_cache *btrfs_bit_radix_cachep;
74 struct kmem_cache *btrfs_path_cachep;
76 #define S_SHIFT 12
77 static unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = {
78 [S_IFREG >> S_SHIFT] = BTRFS_FT_REG_FILE,
79 [S_IFDIR >> S_SHIFT] = BTRFS_FT_DIR,
80 [S_IFCHR >> S_SHIFT] = BTRFS_FT_CHRDEV,
81 [S_IFBLK >> S_SHIFT] = BTRFS_FT_BLKDEV,
82 [S_IFIFO >> S_SHIFT] = BTRFS_FT_FIFO,
83 [S_IFSOCK >> S_SHIFT] = BTRFS_FT_SOCK,
84 [S_IFLNK >> S_SHIFT] = BTRFS_FT_SYMLINK,
87 static void btrfs_truncate(struct inode *inode);
88 static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end);
89 static noinline int cow_file_range(struct inode *inode,
90 struct page *locked_page,
91 u64 start, u64 end, int *page_started,
92 unsigned long *nr_written, int unlock);
94 static int btrfs_init_inode_security(struct inode *inode, struct inode *dir)
96 int err;
98 err = btrfs_init_acl(inode, dir);
99 if (!err)
100 err = btrfs_xattr_security_init(inode, dir);
101 return err;
105 * this does all the hard work for inserting an inline extent into
106 * the btree. The caller should have done a btrfs_drop_extents so that
107 * no overlapping inline items exist in the btree
109 static noinline int insert_inline_extent(struct btrfs_trans_handle *trans,
110 struct btrfs_root *root, struct inode *inode,
111 u64 start, size_t size, size_t compressed_size,
112 struct page **compressed_pages)
114 struct btrfs_key key;
115 struct btrfs_path *path;
116 struct extent_buffer *leaf;
117 struct page *page = NULL;
118 char *kaddr;
119 unsigned long ptr;
120 struct btrfs_file_extent_item *ei;
121 int err = 0;
122 int ret;
123 size_t cur_size = size;
124 size_t datasize;
125 unsigned long offset;
126 int use_compress = 0;
128 if (compressed_size && compressed_pages) {
129 use_compress = 1;
130 cur_size = compressed_size;
133 path = btrfs_alloc_path();
134 if (!path)
135 return -ENOMEM;
137 path->leave_spinning = 1;
138 btrfs_set_trans_block_group(trans, inode);
140 key.objectid = inode->i_ino;
141 key.offset = start;
142 btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
143 datasize = btrfs_file_extent_calc_inline_size(cur_size);
145 inode_add_bytes(inode, size);
146 ret = btrfs_insert_empty_item(trans, root, path, &key,
147 datasize);
148 BUG_ON(ret);
149 if (ret) {
150 err = ret;
151 goto fail;
153 leaf = path->nodes[0];
154 ei = btrfs_item_ptr(leaf, path->slots[0],
155 struct btrfs_file_extent_item);
156 btrfs_set_file_extent_generation(leaf, ei, trans->transid);
157 btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE);
158 btrfs_set_file_extent_encryption(leaf, ei, 0);
159 btrfs_set_file_extent_other_encoding(leaf, ei, 0);
160 btrfs_set_file_extent_ram_bytes(leaf, ei, size);
161 ptr = btrfs_file_extent_inline_start(ei);
163 if (use_compress) {
164 struct page *cpage;
165 int i = 0;
166 while (compressed_size > 0) {
167 cpage = compressed_pages[i];
168 cur_size = min_t(unsigned long, compressed_size,
169 PAGE_CACHE_SIZE);
171 kaddr = kmap_atomic(cpage, KM_USER0);
172 write_extent_buffer(leaf, kaddr, ptr, cur_size);
173 kunmap_atomic(kaddr, KM_USER0);
175 i++;
176 ptr += cur_size;
177 compressed_size -= cur_size;
179 btrfs_set_file_extent_compression(leaf, ei,
180 BTRFS_COMPRESS_ZLIB);
181 } else {
182 page = find_get_page(inode->i_mapping,
183 start >> PAGE_CACHE_SHIFT);
184 btrfs_set_file_extent_compression(leaf, ei, 0);
185 kaddr = kmap_atomic(page, KM_USER0);
186 offset = start & (PAGE_CACHE_SIZE - 1);
187 write_extent_buffer(leaf, kaddr + offset, ptr, size);
188 kunmap_atomic(kaddr, KM_USER0);
189 page_cache_release(page);
191 btrfs_mark_buffer_dirty(leaf);
192 btrfs_free_path(path);
194 BTRFS_I(inode)->disk_i_size = inode->i_size;
195 btrfs_update_inode(trans, root, inode);
196 return 0;
197 fail:
198 btrfs_free_path(path);
199 return err;
204 * conditionally insert an inline extent into the file. This
205 * does the checks required to make sure the data is small enough
206 * to fit as an inline extent.
208 static noinline int cow_file_range_inline(struct btrfs_trans_handle *trans,
209 struct btrfs_root *root,
210 struct inode *inode, u64 start, u64 end,
211 size_t compressed_size,
212 struct page **compressed_pages)
214 u64 isize = i_size_read(inode);
215 u64 actual_end = min(end + 1, isize);
216 u64 inline_len = actual_end - start;
217 u64 aligned_end = (end + root->sectorsize - 1) &
218 ~((u64)root->sectorsize - 1);
219 u64 hint_byte;
220 u64 data_len = inline_len;
221 int ret;
223 if (compressed_size)
224 data_len = compressed_size;
226 if (start > 0 ||
227 actual_end >= PAGE_CACHE_SIZE ||
228 data_len >= BTRFS_MAX_INLINE_DATA_SIZE(root) ||
229 (!compressed_size &&
230 (actual_end & (root->sectorsize - 1)) == 0) ||
231 end + 1 < isize ||
232 data_len > root->fs_info->max_inline) {
233 return 1;
236 ret = btrfs_drop_extents(trans, root, inode, start,
237 aligned_end, start, &hint_byte);
238 BUG_ON(ret);
240 if (isize > actual_end)
241 inline_len = min_t(u64, isize, actual_end);
242 ret = insert_inline_extent(trans, root, inode, start,
243 inline_len, compressed_size,
244 compressed_pages);
245 BUG_ON(ret);
246 btrfs_drop_extent_cache(inode, start, aligned_end, 0);
247 return 0;
250 struct async_extent {
251 u64 start;
252 u64 ram_size;
253 u64 compressed_size;
254 struct page **pages;
255 unsigned long nr_pages;
256 struct list_head list;
259 struct async_cow {
260 struct inode *inode;
261 struct btrfs_root *root;
262 struct page *locked_page;
263 u64 start;
264 u64 end;
265 struct list_head extents;
266 struct btrfs_work work;
269 static noinline int add_async_extent(struct async_cow *cow,
270 u64 start, u64 ram_size,
271 u64 compressed_size,
272 struct page **pages,
273 unsigned long nr_pages)
275 struct async_extent *async_extent;
277 async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS);
278 async_extent->start = start;
279 async_extent->ram_size = ram_size;
280 async_extent->compressed_size = compressed_size;
281 async_extent->pages = pages;
282 async_extent->nr_pages = nr_pages;
283 list_add_tail(&async_extent->list, &cow->extents);
284 return 0;
288 * we create compressed extents in two phases. The first
289 * phase compresses a range of pages that have already been
290 * locked (both pages and state bits are locked).
292 * This is done inside an ordered work queue, and the compression
293 * is spread across many cpus. The actual IO submission is step
294 * two, and the ordered work queue takes care of making sure that
295 * happens in the same order things were put onto the queue by
296 * writepages and friends.
298 * If this code finds it can't get good compression, it puts an
299 * entry onto the work queue to write the uncompressed bytes. This
300 * makes sure that both compressed inodes and uncompressed inodes
301 * are written in the same order that pdflush sent them down.
303 static noinline int compress_file_range(struct inode *inode,
304 struct page *locked_page,
305 u64 start, u64 end,
306 struct async_cow *async_cow,
307 int *num_added)
309 struct btrfs_root *root = BTRFS_I(inode)->root;
310 struct btrfs_trans_handle *trans;
311 u64 num_bytes;
312 u64 orig_start;
313 u64 disk_num_bytes;
314 u64 blocksize = root->sectorsize;
315 u64 actual_end;
316 u64 isize = i_size_read(inode);
317 int ret = 0;
318 struct page **pages = NULL;
319 unsigned long nr_pages;
320 unsigned long nr_pages_ret = 0;
321 unsigned long total_compressed = 0;
322 unsigned long total_in = 0;
323 unsigned long max_compressed = 128 * 1024;
324 unsigned long max_uncompressed = 128 * 1024;
325 int i;
326 int will_compress;
328 orig_start = start;
330 actual_end = min_t(u64, isize, end + 1);
331 again:
332 will_compress = 0;
333 nr_pages = (end >> PAGE_CACHE_SHIFT) - (start >> PAGE_CACHE_SHIFT) + 1;
334 nr_pages = min(nr_pages, (128 * 1024UL) / PAGE_CACHE_SIZE);
337 * we don't want to send crud past the end of i_size through
338 * compression, that's just a waste of CPU time. So, if the
339 * end of the file is before the start of our current
340 * requested range of bytes, we bail out to the uncompressed
341 * cleanup code that can deal with all of this.
343 * It isn't really the fastest way to fix things, but this is a
344 * very uncommon corner.
346 if (actual_end <= start)
347 goto cleanup_and_bail_uncompressed;
349 total_compressed = actual_end - start;
351 /* we want to make sure that amount of ram required to uncompress
352 * an extent is reasonable, so we limit the total size in ram
353 * of a compressed extent to 128k. This is a crucial number
354 * because it also controls how easily we can spread reads across
355 * cpus for decompression.
357 * We also want to make sure the amount of IO required to do
358 * a random read is reasonably small, so we limit the size of
359 * a compressed extent to 128k.
361 total_compressed = min(total_compressed, max_uncompressed);
362 num_bytes = (end - start + blocksize) & ~(blocksize - 1);
363 num_bytes = max(blocksize, num_bytes);
364 disk_num_bytes = num_bytes;
365 total_in = 0;
366 ret = 0;
369 * we do compression for mount -o compress and when the
370 * inode has not been flagged as nocompress. This flag can
371 * change at any time if we discover bad compression ratios.
373 if (!btrfs_test_flag(inode, NOCOMPRESS) &&
374 btrfs_test_opt(root, COMPRESS)) {
375 WARN_ON(pages);
376 pages = kzalloc(sizeof(struct page *) * nr_pages, GFP_NOFS);
378 ret = btrfs_zlib_compress_pages(inode->i_mapping, start,
379 total_compressed, pages,
380 nr_pages, &nr_pages_ret,
381 &total_in,
382 &total_compressed,
383 max_compressed);
385 if (!ret) {
386 unsigned long offset = total_compressed &
387 (PAGE_CACHE_SIZE - 1);
388 struct page *page = pages[nr_pages_ret - 1];
389 char *kaddr;
391 /* zero the tail end of the last page, we might be
392 * sending it down to disk
394 if (offset) {
395 kaddr = kmap_atomic(page, KM_USER0);
396 memset(kaddr + offset, 0,
397 PAGE_CACHE_SIZE - offset);
398 kunmap_atomic(kaddr, KM_USER0);
400 will_compress = 1;
403 if (start == 0) {
404 trans = btrfs_join_transaction(root, 1);
405 BUG_ON(!trans);
406 btrfs_set_trans_block_group(trans, inode);
408 /* lets try to make an inline extent */
409 if (ret || total_in < (actual_end - start)) {
410 /* we didn't compress the entire range, try
411 * to make an uncompressed inline extent.
413 ret = cow_file_range_inline(trans, root, inode,
414 start, end, 0, NULL);
415 } else {
416 /* try making a compressed inline extent */
417 ret = cow_file_range_inline(trans, root, inode,
418 start, end,
419 total_compressed, pages);
421 btrfs_end_transaction(trans, root);
422 if (ret == 0) {
424 * inline extent creation worked, we don't need
425 * to create any more async work items. Unlock
426 * and free up our temp pages.
428 extent_clear_unlock_delalloc(inode,
429 &BTRFS_I(inode)->io_tree,
430 start, end, NULL, 1, 0,
431 0, 1, 1, 1);
432 ret = 0;
433 goto free_pages_out;
437 if (will_compress) {
439 * we aren't doing an inline extent round the compressed size
440 * up to a block size boundary so the allocator does sane
441 * things
443 total_compressed = (total_compressed + blocksize - 1) &
444 ~(blocksize - 1);
447 * one last check to make sure the compression is really a
448 * win, compare the page count read with the blocks on disk
450 total_in = (total_in + PAGE_CACHE_SIZE - 1) &
451 ~(PAGE_CACHE_SIZE - 1);
452 if (total_compressed >= total_in) {
453 will_compress = 0;
454 } else {
455 disk_num_bytes = total_compressed;
456 num_bytes = total_in;
459 if (!will_compress && pages) {
461 * the compression code ran but failed to make things smaller,
462 * free any pages it allocated and our page pointer array
464 for (i = 0; i < nr_pages_ret; i++) {
465 WARN_ON(pages[i]->mapping);
466 page_cache_release(pages[i]);
468 kfree(pages);
469 pages = NULL;
470 total_compressed = 0;
471 nr_pages_ret = 0;
473 /* flag the file so we don't compress in the future */
474 btrfs_set_flag(inode, NOCOMPRESS);
476 if (will_compress) {
477 *num_added += 1;
479 /* the async work queues will take care of doing actual
480 * allocation on disk for these compressed pages,
481 * and will submit them to the elevator.
483 add_async_extent(async_cow, start, num_bytes,
484 total_compressed, pages, nr_pages_ret);
486 if (start + num_bytes < end && start + num_bytes < actual_end) {
487 start += num_bytes;
488 pages = NULL;
489 cond_resched();
490 goto again;
492 } else {
493 cleanup_and_bail_uncompressed:
495 * No compression, but we still need to write the pages in
496 * the file we've been given so far. redirty the locked
497 * page if it corresponds to our extent and set things up
498 * for the async work queue to run cow_file_range to do
499 * the normal delalloc dance
501 if (page_offset(locked_page) >= start &&
502 page_offset(locked_page) <= end) {
503 __set_page_dirty_nobuffers(locked_page);
504 /* unlocked later on in the async handlers */
506 add_async_extent(async_cow, start, end - start + 1, 0, NULL, 0);
507 *num_added += 1;
510 out:
511 return 0;
513 free_pages_out:
514 for (i = 0; i < nr_pages_ret; i++) {
515 WARN_ON(pages[i]->mapping);
516 page_cache_release(pages[i]);
518 kfree(pages);
520 goto out;
524 * phase two of compressed writeback. This is the ordered portion
525 * of the code, which only gets called in the order the work was
526 * queued. We walk all the async extents created by compress_file_range
527 * and send them down to the disk.
529 static noinline int submit_compressed_extents(struct inode *inode,
530 struct async_cow *async_cow)
532 struct async_extent *async_extent;
533 u64 alloc_hint = 0;
534 struct btrfs_trans_handle *trans;
535 struct btrfs_key ins;
536 struct extent_map *em;
537 struct btrfs_root *root = BTRFS_I(inode)->root;
538 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
539 struct extent_io_tree *io_tree;
540 int ret;
542 if (list_empty(&async_cow->extents))
543 return 0;
545 trans = btrfs_join_transaction(root, 1);
547 while (!list_empty(&async_cow->extents)) {
548 async_extent = list_entry(async_cow->extents.next,
549 struct async_extent, list);
550 list_del(&async_extent->list);
552 io_tree = &BTRFS_I(inode)->io_tree;
554 /* did the compression code fall back to uncompressed IO? */
555 if (!async_extent->pages) {
556 int page_started = 0;
557 unsigned long nr_written = 0;
559 lock_extent(io_tree, async_extent->start,
560 async_extent->start +
561 async_extent->ram_size - 1, GFP_NOFS);
563 /* allocate blocks */
564 cow_file_range(inode, async_cow->locked_page,
565 async_extent->start,
566 async_extent->start +
567 async_extent->ram_size - 1,
568 &page_started, &nr_written, 0);
571 * if page_started, cow_file_range inserted an
572 * inline extent and took care of all the unlocking
573 * and IO for us. Otherwise, we need to submit
574 * all those pages down to the drive.
576 if (!page_started)
577 extent_write_locked_range(io_tree,
578 inode, async_extent->start,
579 async_extent->start +
580 async_extent->ram_size - 1,
581 btrfs_get_extent,
582 WB_SYNC_ALL);
583 kfree(async_extent);
584 cond_resched();
585 continue;
588 lock_extent(io_tree, async_extent->start,
589 async_extent->start + async_extent->ram_size - 1,
590 GFP_NOFS);
592 * here we're doing allocation and writeback of the
593 * compressed pages
595 btrfs_drop_extent_cache(inode, async_extent->start,
596 async_extent->start +
597 async_extent->ram_size - 1, 0);
599 ret = btrfs_reserve_extent(trans, root,
600 async_extent->compressed_size,
601 async_extent->compressed_size,
602 0, alloc_hint,
603 (u64)-1, &ins, 1);
604 BUG_ON(ret);
605 em = alloc_extent_map(GFP_NOFS);
606 em->start = async_extent->start;
607 em->len = async_extent->ram_size;
608 em->orig_start = em->start;
610 em->block_start = ins.objectid;
611 em->block_len = ins.offset;
612 em->bdev = root->fs_info->fs_devices->latest_bdev;
613 set_bit(EXTENT_FLAG_PINNED, &em->flags);
614 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
616 while (1) {
617 spin_lock(&em_tree->lock);
618 ret = add_extent_mapping(em_tree, em);
619 spin_unlock(&em_tree->lock);
620 if (ret != -EEXIST) {
621 free_extent_map(em);
622 break;
624 btrfs_drop_extent_cache(inode, async_extent->start,
625 async_extent->start +
626 async_extent->ram_size - 1, 0);
629 ret = btrfs_add_ordered_extent(inode, async_extent->start,
630 ins.objectid,
631 async_extent->ram_size,
632 ins.offset,
633 BTRFS_ORDERED_COMPRESSED);
634 BUG_ON(ret);
636 btrfs_end_transaction(trans, root);
639 * clear dirty, set writeback and unlock the pages.
641 extent_clear_unlock_delalloc(inode,
642 &BTRFS_I(inode)->io_tree,
643 async_extent->start,
644 async_extent->start +
645 async_extent->ram_size - 1,
646 NULL, 1, 1, 0, 1, 1, 0);
648 ret = btrfs_submit_compressed_write(inode,
649 async_extent->start,
650 async_extent->ram_size,
651 ins.objectid,
652 ins.offset, async_extent->pages,
653 async_extent->nr_pages);
655 BUG_ON(ret);
656 trans = btrfs_join_transaction(root, 1);
657 alloc_hint = ins.objectid + ins.offset;
658 kfree(async_extent);
659 cond_resched();
662 btrfs_end_transaction(trans, root);
663 return 0;
667 * when extent_io.c finds a delayed allocation range in the file,
668 * the call backs end up in this code. The basic idea is to
669 * allocate extents on disk for the range, and create ordered data structs
670 * in ram to track those extents.
672 * locked_page is the page that writepage had locked already. We use
673 * it to make sure we don't do extra locks or unlocks.
675 * *page_started is set to one if we unlock locked_page and do everything
676 * required to start IO on it. It may be clean and already done with
677 * IO when we return.
679 static noinline int cow_file_range(struct inode *inode,
680 struct page *locked_page,
681 u64 start, u64 end, int *page_started,
682 unsigned long *nr_written,
683 int unlock)
685 struct btrfs_root *root = BTRFS_I(inode)->root;
686 struct btrfs_trans_handle *trans;
687 u64 alloc_hint = 0;
688 u64 num_bytes;
689 unsigned long ram_size;
690 u64 disk_num_bytes;
691 u64 cur_alloc_size;
692 u64 blocksize = root->sectorsize;
693 u64 actual_end;
694 u64 isize = i_size_read(inode);
695 struct btrfs_key ins;
696 struct extent_map *em;
697 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
698 int ret = 0;
700 trans = btrfs_join_transaction(root, 1);
701 BUG_ON(!trans);
702 btrfs_set_trans_block_group(trans, inode);
704 actual_end = min_t(u64, isize, end + 1);
706 num_bytes = (end - start + blocksize) & ~(blocksize - 1);
707 num_bytes = max(blocksize, num_bytes);
708 disk_num_bytes = num_bytes;
709 ret = 0;
711 if (start == 0) {
712 /* lets try to make an inline extent */
713 ret = cow_file_range_inline(trans, root, inode,
714 start, end, 0, NULL);
715 if (ret == 0) {
716 extent_clear_unlock_delalloc(inode,
717 &BTRFS_I(inode)->io_tree,
718 start, end, NULL, 1, 1,
719 1, 1, 1, 1);
720 *nr_written = *nr_written +
721 (end - start + PAGE_CACHE_SIZE) / PAGE_CACHE_SIZE;
722 *page_started = 1;
723 ret = 0;
724 goto out;
728 BUG_ON(disk_num_bytes >
729 btrfs_super_total_bytes(&root->fs_info->super_copy));
731 btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0);
733 while (disk_num_bytes > 0) {
734 cur_alloc_size = min(disk_num_bytes, root->fs_info->max_extent);
735 ret = btrfs_reserve_extent(trans, root, cur_alloc_size,
736 root->sectorsize, 0, alloc_hint,
737 (u64)-1, &ins, 1);
738 BUG_ON(ret);
740 em = alloc_extent_map(GFP_NOFS);
741 em->start = start;
742 em->orig_start = em->start;
744 ram_size = ins.offset;
745 em->len = ins.offset;
747 em->block_start = ins.objectid;
748 em->block_len = ins.offset;
749 em->bdev = root->fs_info->fs_devices->latest_bdev;
750 set_bit(EXTENT_FLAG_PINNED, &em->flags);
752 while (1) {
753 spin_lock(&em_tree->lock);
754 ret = add_extent_mapping(em_tree, em);
755 spin_unlock(&em_tree->lock);
756 if (ret != -EEXIST) {
757 free_extent_map(em);
758 break;
760 btrfs_drop_extent_cache(inode, start,
761 start + ram_size - 1, 0);
764 cur_alloc_size = ins.offset;
765 ret = btrfs_add_ordered_extent(inode, start, ins.objectid,
766 ram_size, cur_alloc_size, 0);
767 BUG_ON(ret);
769 if (root->root_key.objectid ==
770 BTRFS_DATA_RELOC_TREE_OBJECTID) {
771 ret = btrfs_reloc_clone_csums(inode, start,
772 cur_alloc_size);
773 BUG_ON(ret);
776 if (disk_num_bytes < cur_alloc_size)
777 break;
779 /* we're not doing compressed IO, don't unlock the first
780 * page (which the caller expects to stay locked), don't
781 * clear any dirty bits and don't set any writeback bits
783 extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
784 start, start + ram_size - 1,
785 locked_page, unlock, 1,
786 1, 0, 0, 0);
787 disk_num_bytes -= cur_alloc_size;
788 num_bytes -= cur_alloc_size;
789 alloc_hint = ins.objectid + ins.offset;
790 start += cur_alloc_size;
792 out:
793 ret = 0;
794 btrfs_end_transaction(trans, root);
796 return ret;
800 * work queue call back to started compression on a file and pages
802 static noinline void async_cow_start(struct btrfs_work *work)
804 struct async_cow *async_cow;
805 int num_added = 0;
806 async_cow = container_of(work, struct async_cow, work);
808 compress_file_range(async_cow->inode, async_cow->locked_page,
809 async_cow->start, async_cow->end, async_cow,
810 &num_added);
811 if (num_added == 0)
812 async_cow->inode = NULL;
816 * work queue call back to submit previously compressed pages
818 static noinline void async_cow_submit(struct btrfs_work *work)
820 struct async_cow *async_cow;
821 struct btrfs_root *root;
822 unsigned long nr_pages;
824 async_cow = container_of(work, struct async_cow, work);
826 root = async_cow->root;
827 nr_pages = (async_cow->end - async_cow->start + PAGE_CACHE_SIZE) >>
828 PAGE_CACHE_SHIFT;
830 atomic_sub(nr_pages, &root->fs_info->async_delalloc_pages);
832 if (atomic_read(&root->fs_info->async_delalloc_pages) <
833 5 * 1042 * 1024 &&
834 waitqueue_active(&root->fs_info->async_submit_wait))
835 wake_up(&root->fs_info->async_submit_wait);
837 if (async_cow->inode)
838 submit_compressed_extents(async_cow->inode, async_cow);
841 static noinline void async_cow_free(struct btrfs_work *work)
843 struct async_cow *async_cow;
844 async_cow = container_of(work, struct async_cow, work);
845 kfree(async_cow);
848 static int cow_file_range_async(struct inode *inode, struct page *locked_page,
849 u64 start, u64 end, int *page_started,
850 unsigned long *nr_written)
852 struct async_cow *async_cow;
853 struct btrfs_root *root = BTRFS_I(inode)->root;
854 unsigned long nr_pages;
855 u64 cur_end;
856 int limit = 10 * 1024 * 1042;
858 clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, EXTENT_LOCKED |
859 EXTENT_DELALLOC, 1, 0, GFP_NOFS);
860 while (start < end) {
861 async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
862 async_cow->inode = inode;
863 async_cow->root = root;
864 async_cow->locked_page = locked_page;
865 async_cow->start = start;
867 if (btrfs_test_flag(inode, NOCOMPRESS))
868 cur_end = end;
869 else
870 cur_end = min(end, start + 512 * 1024 - 1);
872 async_cow->end = cur_end;
873 INIT_LIST_HEAD(&async_cow->extents);
875 async_cow->work.func = async_cow_start;
876 async_cow->work.ordered_func = async_cow_submit;
877 async_cow->work.ordered_free = async_cow_free;
878 async_cow->work.flags = 0;
880 nr_pages = (cur_end - start + PAGE_CACHE_SIZE) >>
881 PAGE_CACHE_SHIFT;
882 atomic_add(nr_pages, &root->fs_info->async_delalloc_pages);
884 btrfs_queue_worker(&root->fs_info->delalloc_workers,
885 &async_cow->work);
887 if (atomic_read(&root->fs_info->async_delalloc_pages) > limit) {
888 wait_event(root->fs_info->async_submit_wait,
889 (atomic_read(&root->fs_info->async_delalloc_pages) <
890 limit));
893 while (atomic_read(&root->fs_info->async_submit_draining) &&
894 atomic_read(&root->fs_info->async_delalloc_pages)) {
895 wait_event(root->fs_info->async_submit_wait,
896 (atomic_read(&root->fs_info->async_delalloc_pages) ==
897 0));
900 *nr_written += nr_pages;
901 start = cur_end + 1;
903 *page_started = 1;
904 return 0;
907 static noinline int csum_exist_in_range(struct btrfs_root *root,
908 u64 bytenr, u64 num_bytes)
910 int ret;
911 struct btrfs_ordered_sum *sums;
912 LIST_HEAD(list);
914 ret = btrfs_lookup_csums_range(root->fs_info->csum_root, bytenr,
915 bytenr + num_bytes - 1, &list);
916 if (ret == 0 && list_empty(&list))
917 return 0;
919 while (!list_empty(&list)) {
920 sums = list_entry(list.next, struct btrfs_ordered_sum, list);
921 list_del(&sums->list);
922 kfree(sums);
924 return 1;
928 * when nowcow writeback call back. This checks for snapshots or COW copies
929 * of the extents that exist in the file, and COWs the file as required.
931 * If no cow copies or snapshots exist, we write directly to the existing
932 * blocks on disk
934 static noinline int run_delalloc_nocow(struct inode *inode,
935 struct page *locked_page,
936 u64 start, u64 end, int *page_started, int force,
937 unsigned long *nr_written)
939 struct btrfs_root *root = BTRFS_I(inode)->root;
940 struct btrfs_trans_handle *trans;
941 struct extent_buffer *leaf;
942 struct btrfs_path *path;
943 struct btrfs_file_extent_item *fi;
944 struct btrfs_key found_key;
945 u64 cow_start;
946 u64 cur_offset;
947 u64 extent_end;
948 u64 disk_bytenr;
949 u64 num_bytes;
950 int extent_type;
951 int ret;
952 int type;
953 int nocow;
954 int check_prev = 1;
956 path = btrfs_alloc_path();
957 BUG_ON(!path);
958 trans = btrfs_join_transaction(root, 1);
959 BUG_ON(!trans);
961 cow_start = (u64)-1;
962 cur_offset = start;
963 while (1) {
964 ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino,
965 cur_offset, 0);
966 BUG_ON(ret < 0);
967 if (ret > 0 && path->slots[0] > 0 && check_prev) {
968 leaf = path->nodes[0];
969 btrfs_item_key_to_cpu(leaf, &found_key,
970 path->slots[0] - 1);
971 if (found_key.objectid == inode->i_ino &&
972 found_key.type == BTRFS_EXTENT_DATA_KEY)
973 path->slots[0]--;
975 check_prev = 0;
976 next_slot:
977 leaf = path->nodes[0];
978 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
979 ret = btrfs_next_leaf(root, path);
980 if (ret < 0)
981 BUG_ON(1);
982 if (ret > 0)
983 break;
984 leaf = path->nodes[0];
987 nocow = 0;
988 disk_bytenr = 0;
989 num_bytes = 0;
990 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
992 if (found_key.objectid > inode->i_ino ||
993 found_key.type > BTRFS_EXTENT_DATA_KEY ||
994 found_key.offset > end)
995 break;
997 if (found_key.offset > cur_offset) {
998 extent_end = found_key.offset;
999 goto out_check;
1002 fi = btrfs_item_ptr(leaf, path->slots[0],
1003 struct btrfs_file_extent_item);
1004 extent_type = btrfs_file_extent_type(leaf, fi);
1006 if (extent_type == BTRFS_FILE_EXTENT_REG ||
1007 extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1008 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1009 extent_end = found_key.offset +
1010 btrfs_file_extent_num_bytes(leaf, fi);
1011 if (extent_end <= start) {
1012 path->slots[0]++;
1013 goto next_slot;
1015 if (disk_bytenr == 0)
1016 goto out_check;
1017 if (btrfs_file_extent_compression(leaf, fi) ||
1018 btrfs_file_extent_encryption(leaf, fi) ||
1019 btrfs_file_extent_other_encoding(leaf, fi))
1020 goto out_check;
1021 if (extent_type == BTRFS_FILE_EXTENT_REG && !force)
1022 goto out_check;
1023 if (btrfs_extent_readonly(root, disk_bytenr))
1024 goto out_check;
1025 if (btrfs_cross_ref_exist(trans, root, inode->i_ino,
1026 disk_bytenr))
1027 goto out_check;
1028 disk_bytenr += btrfs_file_extent_offset(leaf, fi);
1029 disk_bytenr += cur_offset - found_key.offset;
1030 num_bytes = min(end + 1, extent_end) - cur_offset;
1032 * force cow if csum exists in the range.
1033 * this ensure that csum for a given extent are
1034 * either valid or do not exist.
1036 if (csum_exist_in_range(root, disk_bytenr, num_bytes))
1037 goto out_check;
1038 nocow = 1;
1039 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
1040 extent_end = found_key.offset +
1041 btrfs_file_extent_inline_len(leaf, fi);
1042 extent_end = ALIGN(extent_end, root->sectorsize);
1043 } else {
1044 BUG_ON(1);
1046 out_check:
1047 if (extent_end <= start) {
1048 path->slots[0]++;
1049 goto next_slot;
1051 if (!nocow) {
1052 if (cow_start == (u64)-1)
1053 cow_start = cur_offset;
1054 cur_offset = extent_end;
1055 if (cur_offset > end)
1056 break;
1057 path->slots[0]++;
1058 goto next_slot;
1061 btrfs_release_path(root, path);
1062 if (cow_start != (u64)-1) {
1063 ret = cow_file_range(inode, locked_page, cow_start,
1064 found_key.offset - 1, page_started,
1065 nr_written, 1);
1066 BUG_ON(ret);
1067 cow_start = (u64)-1;
1070 if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1071 struct extent_map *em;
1072 struct extent_map_tree *em_tree;
1073 em_tree = &BTRFS_I(inode)->extent_tree;
1074 em = alloc_extent_map(GFP_NOFS);
1075 em->start = cur_offset;
1076 em->orig_start = em->start;
1077 em->len = num_bytes;
1078 em->block_len = num_bytes;
1079 em->block_start = disk_bytenr;
1080 em->bdev = root->fs_info->fs_devices->latest_bdev;
1081 set_bit(EXTENT_FLAG_PINNED, &em->flags);
1082 while (1) {
1083 spin_lock(&em_tree->lock);
1084 ret = add_extent_mapping(em_tree, em);
1085 spin_unlock(&em_tree->lock);
1086 if (ret != -EEXIST) {
1087 free_extent_map(em);
1088 break;
1090 btrfs_drop_extent_cache(inode, em->start,
1091 em->start + em->len - 1, 0);
1093 type = BTRFS_ORDERED_PREALLOC;
1094 } else {
1095 type = BTRFS_ORDERED_NOCOW;
1098 ret = btrfs_add_ordered_extent(inode, cur_offset, disk_bytenr,
1099 num_bytes, num_bytes, type);
1100 BUG_ON(ret);
1102 extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
1103 cur_offset, cur_offset + num_bytes - 1,
1104 locked_page, 1, 1, 1, 0, 0, 0);
1105 cur_offset = extent_end;
1106 if (cur_offset > end)
1107 break;
1109 btrfs_release_path(root, path);
1111 if (cur_offset <= end && cow_start == (u64)-1)
1112 cow_start = cur_offset;
1113 if (cow_start != (u64)-1) {
1114 ret = cow_file_range(inode, locked_page, cow_start, end,
1115 page_started, nr_written, 1);
1116 BUG_ON(ret);
1119 ret = btrfs_end_transaction(trans, root);
1120 BUG_ON(ret);
1121 btrfs_free_path(path);
1122 return 0;
1126 * extent_io.c call back to do delayed allocation processing
1128 static int run_delalloc_range(struct inode *inode, struct page *locked_page,
1129 u64 start, u64 end, int *page_started,
1130 unsigned long *nr_written)
1132 int ret;
1133 struct btrfs_root *root = BTRFS_I(inode)->root;
1135 if (btrfs_test_flag(inode, NODATACOW))
1136 ret = run_delalloc_nocow(inode, locked_page, start, end,
1137 page_started, 1, nr_written);
1138 else if (btrfs_test_flag(inode, PREALLOC))
1139 ret = run_delalloc_nocow(inode, locked_page, start, end,
1140 page_started, 0, nr_written);
1141 else if (!btrfs_test_opt(root, COMPRESS))
1142 ret = cow_file_range(inode, locked_page, start, end,
1143 page_started, nr_written, 1);
1144 else
1145 ret = cow_file_range_async(inode, locked_page, start, end,
1146 page_started, nr_written);
1147 return ret;
1151 * extent_io.c set_bit_hook, used to track delayed allocation
1152 * bytes in this file, and to maintain the list of inodes that
1153 * have pending delalloc work to be done.
1155 static int btrfs_set_bit_hook(struct inode *inode, u64 start, u64 end,
1156 unsigned long old, unsigned long bits)
1159 * set_bit and clear bit hooks normally require _irqsave/restore
1160 * but in this case, we are only testeing for the DELALLOC
1161 * bit, which is only set or cleared with irqs on
1163 if (!(old & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) {
1164 struct btrfs_root *root = BTRFS_I(inode)->root;
1165 btrfs_delalloc_reserve_space(root, inode, end - start + 1);
1166 spin_lock(&root->fs_info->delalloc_lock);
1167 BTRFS_I(inode)->delalloc_bytes += end - start + 1;
1168 root->fs_info->delalloc_bytes += end - start + 1;
1169 if (list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
1170 list_add_tail(&BTRFS_I(inode)->delalloc_inodes,
1171 &root->fs_info->delalloc_inodes);
1173 spin_unlock(&root->fs_info->delalloc_lock);
1175 return 0;
1179 * extent_io.c clear_bit_hook, see set_bit_hook for why
1181 static int btrfs_clear_bit_hook(struct inode *inode, u64 start, u64 end,
1182 unsigned long old, unsigned long bits)
1185 * set_bit and clear bit hooks normally require _irqsave/restore
1186 * but in this case, we are only testeing for the DELALLOC
1187 * bit, which is only set or cleared with irqs on
1189 if ((old & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) {
1190 struct btrfs_root *root = BTRFS_I(inode)->root;
1192 spin_lock(&root->fs_info->delalloc_lock);
1193 if (end - start + 1 > root->fs_info->delalloc_bytes) {
1194 printk(KERN_INFO "btrfs warning: delalloc account "
1195 "%llu %llu\n",
1196 (unsigned long long)end - start + 1,
1197 (unsigned long long)
1198 root->fs_info->delalloc_bytes);
1199 btrfs_delalloc_free_space(root, inode, (u64)-1);
1200 root->fs_info->delalloc_bytes = 0;
1201 BTRFS_I(inode)->delalloc_bytes = 0;
1202 } else {
1203 btrfs_delalloc_free_space(root, inode,
1204 end - start + 1);
1205 root->fs_info->delalloc_bytes -= end - start + 1;
1206 BTRFS_I(inode)->delalloc_bytes -= end - start + 1;
1208 if (BTRFS_I(inode)->delalloc_bytes == 0 &&
1209 !list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
1210 list_del_init(&BTRFS_I(inode)->delalloc_inodes);
1212 spin_unlock(&root->fs_info->delalloc_lock);
1214 return 0;
1218 * extent_io.c merge_bio_hook, this must check the chunk tree to make sure
1219 * we don't create bios that span stripes or chunks
1221 int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
1222 size_t size, struct bio *bio,
1223 unsigned long bio_flags)
1225 struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
1226 struct btrfs_mapping_tree *map_tree;
1227 u64 logical = (u64)bio->bi_sector << 9;
1228 u64 length = 0;
1229 u64 map_length;
1230 int ret;
1232 if (bio_flags & EXTENT_BIO_COMPRESSED)
1233 return 0;
1235 length = bio->bi_size;
1236 map_tree = &root->fs_info->mapping_tree;
1237 map_length = length;
1238 ret = btrfs_map_block(map_tree, READ, logical,
1239 &map_length, NULL, 0);
1241 if (map_length < length + size)
1242 return 1;
1243 return 0;
1247 * in order to insert checksums into the metadata in large chunks,
1248 * we wait until bio submission time. All the pages in the bio are
1249 * checksummed and sums are attached onto the ordered extent record.
1251 * At IO completion time the cums attached on the ordered extent record
1252 * are inserted into the btree
1254 static int __btrfs_submit_bio_start(struct inode *inode, int rw,
1255 struct bio *bio, int mirror_num,
1256 unsigned long bio_flags)
1258 struct btrfs_root *root = BTRFS_I(inode)->root;
1259 int ret = 0;
1261 ret = btrfs_csum_one_bio(root, inode, bio, 0, 0);
1262 BUG_ON(ret);
1263 return 0;
1267 * in order to insert checksums into the metadata in large chunks,
1268 * we wait until bio submission time. All the pages in the bio are
1269 * checksummed and sums are attached onto the ordered extent record.
1271 * At IO completion time the cums attached on the ordered extent record
1272 * are inserted into the btree
1274 static int __btrfs_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
1275 int mirror_num, unsigned long bio_flags)
1277 struct btrfs_root *root = BTRFS_I(inode)->root;
1278 return btrfs_map_bio(root, rw, bio, mirror_num, 1);
1282 * extent_io.c submission hook. This does the right thing for csum calculation
1283 * on write, or reading the csums from the tree before a read
1285 static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
1286 int mirror_num, unsigned long bio_flags)
1288 struct btrfs_root *root = BTRFS_I(inode)->root;
1289 int ret = 0;
1290 int skip_sum;
1292 skip_sum = btrfs_test_flag(inode, NODATASUM);
1294 ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
1295 BUG_ON(ret);
1297 if (!(rw & (1 << BIO_RW))) {
1298 if (bio_flags & EXTENT_BIO_COMPRESSED) {
1299 return btrfs_submit_compressed_read(inode, bio,
1300 mirror_num, bio_flags);
1301 } else if (!skip_sum)
1302 btrfs_lookup_bio_sums(root, inode, bio, NULL);
1303 goto mapit;
1304 } else if (!skip_sum) {
1305 /* csum items have already been cloned */
1306 if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
1307 goto mapit;
1308 /* we're doing a write, do the async checksumming */
1309 return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
1310 inode, rw, bio, mirror_num,
1311 bio_flags, __btrfs_submit_bio_start,
1312 __btrfs_submit_bio_done);
1315 mapit:
1316 return btrfs_map_bio(root, rw, bio, mirror_num, 0);
1320 * given a list of ordered sums record them in the inode. This happens
1321 * at IO completion time based on sums calculated at bio submission time.
1323 static noinline int add_pending_csums(struct btrfs_trans_handle *trans,
1324 struct inode *inode, u64 file_offset,
1325 struct list_head *list)
1327 struct btrfs_ordered_sum *sum;
1329 btrfs_set_trans_block_group(trans, inode);
1331 list_for_each_entry(sum, list, list) {
1332 btrfs_csum_file_blocks(trans,
1333 BTRFS_I(inode)->root->fs_info->csum_root, sum);
1335 return 0;
1338 int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end)
1340 if ((end & (PAGE_CACHE_SIZE - 1)) == 0)
1341 WARN_ON(1);
1342 return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end,
1343 GFP_NOFS);
1346 /* see btrfs_writepage_start_hook for details on why this is required */
1347 struct btrfs_writepage_fixup {
1348 struct page *page;
1349 struct btrfs_work work;
1352 static void btrfs_writepage_fixup_worker(struct btrfs_work *work)
1354 struct btrfs_writepage_fixup *fixup;
1355 struct btrfs_ordered_extent *ordered;
1356 struct page *page;
1357 struct inode *inode;
1358 u64 page_start;
1359 u64 page_end;
1361 fixup = container_of(work, struct btrfs_writepage_fixup, work);
1362 page = fixup->page;
1363 again:
1364 lock_page(page);
1365 if (!page->mapping || !PageDirty(page) || !PageChecked(page)) {
1366 ClearPageChecked(page);
1367 goto out_page;
1370 inode = page->mapping->host;
1371 page_start = page_offset(page);
1372 page_end = page_offset(page) + PAGE_CACHE_SIZE - 1;
1374 lock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end, GFP_NOFS);
1376 /* already ordered? We're done */
1377 if (test_range_bit(&BTRFS_I(inode)->io_tree, page_start, page_end,
1378 EXTENT_ORDERED, 0)) {
1379 goto out;
1382 ordered = btrfs_lookup_ordered_extent(inode, page_start);
1383 if (ordered) {
1384 unlock_extent(&BTRFS_I(inode)->io_tree, page_start,
1385 page_end, GFP_NOFS);
1386 unlock_page(page);
1387 btrfs_start_ordered_extent(inode, ordered, 1);
1388 goto again;
1391 btrfs_set_extent_delalloc(inode, page_start, page_end);
1392 ClearPageChecked(page);
1393 out:
1394 unlock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end, GFP_NOFS);
1395 out_page:
1396 unlock_page(page);
1397 page_cache_release(page);
1401 * There are a few paths in the higher layers of the kernel that directly
1402 * set the page dirty bit without asking the filesystem if it is a
1403 * good idea. This causes problems because we want to make sure COW
1404 * properly happens and the data=ordered rules are followed.
1406 * In our case any range that doesn't have the ORDERED bit set
1407 * hasn't been properly setup for IO. We kick off an async process
1408 * to fix it up. The async helper will wait for ordered extents, set
1409 * the delalloc bit and make it safe to write the page.
1411 static int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end)
1413 struct inode *inode = page->mapping->host;
1414 struct btrfs_writepage_fixup *fixup;
1415 struct btrfs_root *root = BTRFS_I(inode)->root;
1416 int ret;
1418 ret = test_range_bit(&BTRFS_I(inode)->io_tree, start, end,
1419 EXTENT_ORDERED, 0);
1420 if (ret)
1421 return 0;
1423 if (PageChecked(page))
1424 return -EAGAIN;
1426 fixup = kzalloc(sizeof(*fixup), GFP_NOFS);
1427 if (!fixup)
1428 return -EAGAIN;
1430 SetPageChecked(page);
1431 page_cache_get(page);
1432 fixup->work.func = btrfs_writepage_fixup_worker;
1433 fixup->page = page;
1434 btrfs_queue_worker(&root->fs_info->fixup_workers, &fixup->work);
1435 return -EAGAIN;
1438 static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
1439 struct inode *inode, u64 file_pos,
1440 u64 disk_bytenr, u64 disk_num_bytes,
1441 u64 num_bytes, u64 ram_bytes,
1442 u8 compression, u8 encryption,
1443 u16 other_encoding, int extent_type)
1445 struct btrfs_root *root = BTRFS_I(inode)->root;
1446 struct btrfs_file_extent_item *fi;
1447 struct btrfs_path *path;
1448 struct extent_buffer *leaf;
1449 struct btrfs_key ins;
1450 u64 hint;
1451 int ret;
1453 path = btrfs_alloc_path();
1454 BUG_ON(!path);
1456 path->leave_spinning = 1;
1457 ret = btrfs_drop_extents(trans, root, inode, file_pos,
1458 file_pos + num_bytes, file_pos, &hint);
1459 BUG_ON(ret);
1461 ins.objectid = inode->i_ino;
1462 ins.offset = file_pos;
1463 ins.type = BTRFS_EXTENT_DATA_KEY;
1464 ret = btrfs_insert_empty_item(trans, root, path, &ins, sizeof(*fi));
1465 BUG_ON(ret);
1466 leaf = path->nodes[0];
1467 fi = btrfs_item_ptr(leaf, path->slots[0],
1468 struct btrfs_file_extent_item);
1469 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1470 btrfs_set_file_extent_type(leaf, fi, extent_type);
1471 btrfs_set_file_extent_disk_bytenr(leaf, fi, disk_bytenr);
1472 btrfs_set_file_extent_disk_num_bytes(leaf, fi, disk_num_bytes);
1473 btrfs_set_file_extent_offset(leaf, fi, 0);
1474 btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
1475 btrfs_set_file_extent_ram_bytes(leaf, fi, ram_bytes);
1476 btrfs_set_file_extent_compression(leaf, fi, compression);
1477 btrfs_set_file_extent_encryption(leaf, fi, encryption);
1478 btrfs_set_file_extent_other_encoding(leaf, fi, other_encoding);
1480 btrfs_unlock_up_safe(path, 1);
1481 btrfs_set_lock_blocking(leaf);
1483 btrfs_mark_buffer_dirty(leaf);
1485 inode_add_bytes(inode, num_bytes);
1486 btrfs_drop_extent_cache(inode, file_pos, file_pos + num_bytes - 1, 0);
1488 ins.objectid = disk_bytenr;
1489 ins.offset = disk_num_bytes;
1490 ins.type = BTRFS_EXTENT_ITEM_KEY;
1491 ret = btrfs_alloc_reserved_extent(trans, root, leaf->start,
1492 root->root_key.objectid,
1493 trans->transid, inode->i_ino, &ins);
1494 BUG_ON(ret);
1495 btrfs_free_path(path);
1497 return 0;
1501 * helper function for btrfs_finish_ordered_io, this
1502 * just reads in some of the csum leaves to prime them into ram
1503 * before we start the transaction. It limits the amount of btree
1504 * reads required while inside the transaction.
1506 static noinline void reada_csum(struct btrfs_root *root,
1507 struct btrfs_path *path,
1508 struct btrfs_ordered_extent *ordered_extent)
1510 struct btrfs_ordered_sum *sum;
1511 u64 bytenr;
1513 sum = list_entry(ordered_extent->list.next, struct btrfs_ordered_sum,
1514 list);
1515 bytenr = sum->sums[0].bytenr;
1518 * we don't care about the results, the point of this search is
1519 * just to get the btree leaves into ram
1521 btrfs_lookup_csum(NULL, root->fs_info->csum_root, path, bytenr, 0);
1524 /* as ordered data IO finishes, this gets called so we can finish
1525 * an ordered extent if the range of bytes in the file it covers are
1526 * fully written.
1528 static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
1530 struct btrfs_root *root = BTRFS_I(inode)->root;
1531 struct btrfs_trans_handle *trans;
1532 struct btrfs_ordered_extent *ordered_extent = NULL;
1533 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
1534 struct btrfs_path *path;
1535 int compressed = 0;
1536 int ret;
1538 ret = btrfs_dec_test_ordered_pending(inode, start, end - start + 1);
1539 if (!ret)
1540 return 0;
1543 * before we join the transaction, try to do some of our IO.
1544 * This will limit the amount of IO that we have to do with
1545 * the transaction running. We're unlikely to need to do any
1546 * IO if the file extents are new, the disk_i_size checks
1547 * covers the most common case.
1549 if (start < BTRFS_I(inode)->disk_i_size) {
1550 path = btrfs_alloc_path();
1551 if (path) {
1552 ret = btrfs_lookup_file_extent(NULL, root, path,
1553 inode->i_ino,
1554 start, 0);
1555 ordered_extent = btrfs_lookup_ordered_extent(inode,
1556 start);
1557 if (!list_empty(&ordered_extent->list)) {
1558 btrfs_release_path(root, path);
1559 reada_csum(root, path, ordered_extent);
1561 btrfs_free_path(path);
1565 trans = btrfs_join_transaction(root, 1);
1567 if (!ordered_extent)
1568 ordered_extent = btrfs_lookup_ordered_extent(inode, start);
1569 BUG_ON(!ordered_extent);
1570 if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags))
1571 goto nocow;
1573 lock_extent(io_tree, ordered_extent->file_offset,
1574 ordered_extent->file_offset + ordered_extent->len - 1,
1575 GFP_NOFS);
1577 if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags))
1578 compressed = 1;
1579 if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
1580 BUG_ON(compressed);
1581 ret = btrfs_mark_extent_written(trans, root, inode,
1582 ordered_extent->file_offset,
1583 ordered_extent->file_offset +
1584 ordered_extent->len);
1585 BUG_ON(ret);
1586 } else {
1587 ret = insert_reserved_file_extent(trans, inode,
1588 ordered_extent->file_offset,
1589 ordered_extent->start,
1590 ordered_extent->disk_len,
1591 ordered_extent->len,
1592 ordered_extent->len,
1593 compressed, 0, 0,
1594 BTRFS_FILE_EXTENT_REG);
1595 BUG_ON(ret);
1597 unlock_extent(io_tree, ordered_extent->file_offset,
1598 ordered_extent->file_offset + ordered_extent->len - 1,
1599 GFP_NOFS);
1600 nocow:
1601 add_pending_csums(trans, inode, ordered_extent->file_offset,
1602 &ordered_extent->list);
1604 mutex_lock(&BTRFS_I(inode)->extent_mutex);
1605 btrfs_ordered_update_i_size(inode, ordered_extent);
1606 btrfs_update_inode(trans, root, inode);
1607 btrfs_remove_ordered_extent(inode, ordered_extent);
1608 mutex_unlock(&BTRFS_I(inode)->extent_mutex);
1610 /* once for us */
1611 btrfs_put_ordered_extent(ordered_extent);
1612 /* once for the tree */
1613 btrfs_put_ordered_extent(ordered_extent);
1615 btrfs_end_transaction(trans, root);
1616 return 0;
1619 static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end,
1620 struct extent_state *state, int uptodate)
1622 return btrfs_finish_ordered_io(page->mapping->host, start, end);
1626 * When IO fails, either with EIO or csum verification fails, we
1627 * try other mirrors that might have a good copy of the data. This
1628 * io_failure_record is used to record state as we go through all the
1629 * mirrors. If another mirror has good data, the page is set up to date
1630 * and things continue. If a good mirror can't be found, the original
1631 * bio end_io callback is called to indicate things have failed.
1633 struct io_failure_record {
1634 struct page *page;
1635 u64 start;
1636 u64 len;
1637 u64 logical;
1638 unsigned long bio_flags;
1639 int last_mirror;
1642 static int btrfs_io_failed_hook(struct bio *failed_bio,
1643 struct page *page, u64 start, u64 end,
1644 struct extent_state *state)
1646 struct io_failure_record *failrec = NULL;
1647 u64 private;
1648 struct extent_map *em;
1649 struct inode *inode = page->mapping->host;
1650 struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
1651 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
1652 struct bio *bio;
1653 int num_copies;
1654 int ret;
1655 int rw;
1656 u64 logical;
1658 ret = get_state_private(failure_tree, start, &private);
1659 if (ret) {
1660 failrec = kmalloc(sizeof(*failrec), GFP_NOFS);
1661 if (!failrec)
1662 return -ENOMEM;
1663 failrec->start = start;
1664 failrec->len = end - start + 1;
1665 failrec->last_mirror = 0;
1666 failrec->bio_flags = 0;
1668 spin_lock(&em_tree->lock);
1669 em = lookup_extent_mapping(em_tree, start, failrec->len);
1670 if (em->start > start || em->start + em->len < start) {
1671 free_extent_map(em);
1672 em = NULL;
1674 spin_unlock(&em_tree->lock);
1676 if (!em || IS_ERR(em)) {
1677 kfree(failrec);
1678 return -EIO;
1680 logical = start - em->start;
1681 logical = em->block_start + logical;
1682 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
1683 logical = em->block_start;
1684 failrec->bio_flags = EXTENT_BIO_COMPRESSED;
1686 failrec->logical = logical;
1687 free_extent_map(em);
1688 set_extent_bits(failure_tree, start, end, EXTENT_LOCKED |
1689 EXTENT_DIRTY, GFP_NOFS);
1690 set_state_private(failure_tree, start,
1691 (u64)(unsigned long)failrec);
1692 } else {
1693 failrec = (struct io_failure_record *)(unsigned long)private;
1695 num_copies = btrfs_num_copies(
1696 &BTRFS_I(inode)->root->fs_info->mapping_tree,
1697 failrec->logical, failrec->len);
1698 failrec->last_mirror++;
1699 if (!state) {
1700 spin_lock(&BTRFS_I(inode)->io_tree.lock);
1701 state = find_first_extent_bit_state(&BTRFS_I(inode)->io_tree,
1702 failrec->start,
1703 EXTENT_LOCKED);
1704 if (state && state->start != failrec->start)
1705 state = NULL;
1706 spin_unlock(&BTRFS_I(inode)->io_tree.lock);
1708 if (!state || failrec->last_mirror > num_copies) {
1709 set_state_private(failure_tree, failrec->start, 0);
1710 clear_extent_bits(failure_tree, failrec->start,
1711 failrec->start + failrec->len - 1,
1712 EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS);
1713 kfree(failrec);
1714 return -EIO;
1716 bio = bio_alloc(GFP_NOFS, 1);
1717 bio->bi_private = state;
1718 bio->bi_end_io = failed_bio->bi_end_io;
1719 bio->bi_sector = failrec->logical >> 9;
1720 bio->bi_bdev = failed_bio->bi_bdev;
1721 bio->bi_size = 0;
1723 bio_add_page(bio, page, failrec->len, start - page_offset(page));
1724 if (failed_bio->bi_rw & (1 << BIO_RW))
1725 rw = WRITE;
1726 else
1727 rw = READ;
1729 BTRFS_I(inode)->io_tree.ops->submit_bio_hook(inode, rw, bio,
1730 failrec->last_mirror,
1731 failrec->bio_flags);
1732 return 0;
1736 * each time an IO finishes, we do a fast check in the IO failure tree
1737 * to see if we need to process or clean up an io_failure_record
1739 static int btrfs_clean_io_failures(struct inode *inode, u64 start)
1741 u64 private;
1742 u64 private_failure;
1743 struct io_failure_record *failure;
1744 int ret;
1746 private = 0;
1747 if (count_range_bits(&BTRFS_I(inode)->io_failure_tree, &private,
1748 (u64)-1, 1, EXTENT_DIRTY)) {
1749 ret = get_state_private(&BTRFS_I(inode)->io_failure_tree,
1750 start, &private_failure);
1751 if (ret == 0) {
1752 failure = (struct io_failure_record *)(unsigned long)
1753 private_failure;
1754 set_state_private(&BTRFS_I(inode)->io_failure_tree,
1755 failure->start, 0);
1756 clear_extent_bits(&BTRFS_I(inode)->io_failure_tree,
1757 failure->start,
1758 failure->start + failure->len - 1,
1759 EXTENT_DIRTY | EXTENT_LOCKED,
1760 GFP_NOFS);
1761 kfree(failure);
1764 return 0;
1768 * when reads are done, we need to check csums to verify the data is correct
1769 * if there's a match, we allow the bio to finish. If not, we go through
1770 * the io_failure_record routines to find good copies
1772 static int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end,
1773 struct extent_state *state)
1775 size_t offset = start - ((u64)page->index << PAGE_CACHE_SHIFT);
1776 struct inode *inode = page->mapping->host;
1777 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
1778 char *kaddr;
1779 u64 private = ~(u32)0;
1780 int ret;
1781 struct btrfs_root *root = BTRFS_I(inode)->root;
1782 u32 csum = ~(u32)0;
1784 if (PageChecked(page)) {
1785 ClearPageChecked(page);
1786 goto good;
1788 if (btrfs_test_flag(inode, NODATASUM))
1789 return 0;
1791 if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID &&
1792 test_range_bit(io_tree, start, end, EXTENT_NODATASUM, 1)) {
1793 clear_extent_bits(io_tree, start, end, EXTENT_NODATASUM,
1794 GFP_NOFS);
1795 return 0;
1798 if (state && state->start == start) {
1799 private = state->private;
1800 ret = 0;
1801 } else {
1802 ret = get_state_private(io_tree, start, &private);
1804 kaddr = kmap_atomic(page, KM_USER0);
1805 if (ret)
1806 goto zeroit;
1808 csum = btrfs_csum_data(root, kaddr + offset, csum, end - start + 1);
1809 btrfs_csum_final(csum, (char *)&csum);
1810 if (csum != private)
1811 goto zeroit;
1813 kunmap_atomic(kaddr, KM_USER0);
1814 good:
1815 /* if the io failure tree for this inode is non-empty,
1816 * check to see if we've recovered from a failed IO
1818 btrfs_clean_io_failures(inode, start);
1819 return 0;
1821 zeroit:
1822 printk(KERN_INFO "btrfs csum failed ino %lu off %llu csum %u "
1823 "private %llu\n", page->mapping->host->i_ino,
1824 (unsigned long long)start, csum,
1825 (unsigned long long)private);
1826 memset(kaddr + offset, 1, end - start + 1);
1827 flush_dcache_page(page);
1828 kunmap_atomic(kaddr, KM_USER0);
1829 if (private == 0)
1830 return 0;
1831 return -EIO;
1835 * This creates an orphan entry for the given inode in case something goes
1836 * wrong in the middle of an unlink/truncate.
1838 int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
1840 struct btrfs_root *root = BTRFS_I(inode)->root;
1841 int ret = 0;
1843 spin_lock(&root->list_lock);
1845 /* already on the orphan list, we're good */
1846 if (!list_empty(&BTRFS_I(inode)->i_orphan)) {
1847 spin_unlock(&root->list_lock);
1848 return 0;
1851 list_add(&BTRFS_I(inode)->i_orphan, &root->orphan_list);
1853 spin_unlock(&root->list_lock);
1856 * insert an orphan item to track this unlinked/truncated file
1858 ret = btrfs_insert_orphan_item(trans, root, inode->i_ino);
1860 return ret;
1864 * We have done the truncate/delete so we can go ahead and remove the orphan
1865 * item for this particular inode.
1867 int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode)
1869 struct btrfs_root *root = BTRFS_I(inode)->root;
1870 int ret = 0;
1872 spin_lock(&root->list_lock);
1874 if (list_empty(&BTRFS_I(inode)->i_orphan)) {
1875 spin_unlock(&root->list_lock);
1876 return 0;
1879 list_del_init(&BTRFS_I(inode)->i_orphan);
1880 if (!trans) {
1881 spin_unlock(&root->list_lock);
1882 return 0;
1885 spin_unlock(&root->list_lock);
1887 ret = btrfs_del_orphan_item(trans, root, inode->i_ino);
1889 return ret;
1893 * this cleans up any orphans that may be left on the list from the last use
1894 * of this root.
1896 void btrfs_orphan_cleanup(struct btrfs_root *root)
1898 struct btrfs_path *path;
1899 struct extent_buffer *leaf;
1900 struct btrfs_item *item;
1901 struct btrfs_key key, found_key;
1902 struct btrfs_trans_handle *trans;
1903 struct inode *inode;
1904 int ret = 0, nr_unlink = 0, nr_truncate = 0;
1906 path = btrfs_alloc_path();
1907 if (!path)
1908 return;
1909 path->reada = -1;
1911 key.objectid = BTRFS_ORPHAN_OBJECTID;
1912 btrfs_set_key_type(&key, BTRFS_ORPHAN_ITEM_KEY);
1913 key.offset = (u64)-1;
1916 while (1) {
1917 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1918 if (ret < 0) {
1919 printk(KERN_ERR "Error searching slot for orphan: %d"
1920 "\n", ret);
1921 break;
1925 * if ret == 0 means we found what we were searching for, which
1926 * is weird, but possible, so only screw with path if we didnt
1927 * find the key and see if we have stuff that matches
1929 if (ret > 0) {
1930 if (path->slots[0] == 0)
1931 break;
1932 path->slots[0]--;
1935 /* pull out the item */
1936 leaf = path->nodes[0];
1937 item = btrfs_item_nr(leaf, path->slots[0]);
1938 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1940 /* make sure the item matches what we want */
1941 if (found_key.objectid != BTRFS_ORPHAN_OBJECTID)
1942 break;
1943 if (btrfs_key_type(&found_key) != BTRFS_ORPHAN_ITEM_KEY)
1944 break;
1946 /* release the path since we're done with it */
1947 btrfs_release_path(root, path);
1950 * this is where we are basically btrfs_lookup, without the
1951 * crossing root thing. we store the inode number in the
1952 * offset of the orphan item.
1954 inode = btrfs_iget_locked(root->fs_info->sb,
1955 found_key.offset, root);
1956 if (!inode)
1957 break;
1959 if (inode->i_state & I_NEW) {
1960 BTRFS_I(inode)->root = root;
1962 /* have to set the location manually */
1963 BTRFS_I(inode)->location.objectid = inode->i_ino;
1964 BTRFS_I(inode)->location.type = BTRFS_INODE_ITEM_KEY;
1965 BTRFS_I(inode)->location.offset = 0;
1967 btrfs_read_locked_inode(inode);
1968 unlock_new_inode(inode);
1972 * add this inode to the orphan list so btrfs_orphan_del does
1973 * the proper thing when we hit it
1975 spin_lock(&root->list_lock);
1976 list_add(&BTRFS_I(inode)->i_orphan, &root->orphan_list);
1977 spin_unlock(&root->list_lock);
1980 * if this is a bad inode, means we actually succeeded in
1981 * removing the inode, but not the orphan record, which means
1982 * we need to manually delete the orphan since iput will just
1983 * do a destroy_inode
1985 if (is_bad_inode(inode)) {
1986 trans = btrfs_start_transaction(root, 1);
1987 btrfs_orphan_del(trans, inode);
1988 btrfs_end_transaction(trans, root);
1989 iput(inode);
1990 continue;
1993 /* if we have links, this was a truncate, lets do that */
1994 if (inode->i_nlink) {
1995 nr_truncate++;
1996 btrfs_truncate(inode);
1997 } else {
1998 nr_unlink++;
2001 /* this will do delete_inode and everything for us */
2002 iput(inode);
2005 if (nr_unlink)
2006 printk(KERN_INFO "btrfs: unlinked %d orphans\n", nr_unlink);
2007 if (nr_truncate)
2008 printk(KERN_INFO "btrfs: truncated %d orphans\n", nr_truncate);
2010 btrfs_free_path(path);
2014 * read an inode from the btree into the in-memory inode
2016 void btrfs_read_locked_inode(struct inode *inode)
2018 struct btrfs_path *path;
2019 struct extent_buffer *leaf;
2020 struct btrfs_inode_item *inode_item;
2021 struct btrfs_timespec *tspec;
2022 struct btrfs_root *root = BTRFS_I(inode)->root;
2023 struct btrfs_key location;
2024 u64 alloc_group_block;
2025 u32 rdev;
2026 int ret;
2028 path = btrfs_alloc_path();
2029 BUG_ON(!path);
2030 memcpy(&location, &BTRFS_I(inode)->location, sizeof(location));
2032 ret = btrfs_lookup_inode(NULL, root, path, &location, 0);
2033 if (ret)
2034 goto make_bad;
2036 leaf = path->nodes[0];
2037 inode_item = btrfs_item_ptr(leaf, path->slots[0],
2038 struct btrfs_inode_item);
2040 inode->i_mode = btrfs_inode_mode(leaf, inode_item);
2041 inode->i_nlink = btrfs_inode_nlink(leaf, inode_item);
2042 inode->i_uid = btrfs_inode_uid(leaf, inode_item);
2043 inode->i_gid = btrfs_inode_gid(leaf, inode_item);
2044 btrfs_i_size_write(inode, btrfs_inode_size(leaf, inode_item));
2046 tspec = btrfs_inode_atime(inode_item);
2047 inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, tspec);
2048 inode->i_atime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
2050 tspec = btrfs_inode_mtime(inode_item);
2051 inode->i_mtime.tv_sec = btrfs_timespec_sec(leaf, tspec);
2052 inode->i_mtime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
2054 tspec = btrfs_inode_ctime(inode_item);
2055 inode->i_ctime.tv_sec = btrfs_timespec_sec(leaf, tspec);
2056 inode->i_ctime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
2058 inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item));
2059 BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item);
2060 BTRFS_I(inode)->sequence = btrfs_inode_sequence(leaf, inode_item);
2061 inode->i_generation = BTRFS_I(inode)->generation;
2062 inode->i_rdev = 0;
2063 rdev = btrfs_inode_rdev(leaf, inode_item);
2065 BTRFS_I(inode)->index_cnt = (u64)-1;
2066 BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item);
2068 alloc_group_block = btrfs_inode_block_group(leaf, inode_item);
2070 BTRFS_I(inode)->block_group = btrfs_find_block_group(root, 0,
2071 alloc_group_block, 0);
2072 btrfs_free_path(path);
2073 inode_item = NULL;
2075 switch (inode->i_mode & S_IFMT) {
2076 case S_IFREG:
2077 inode->i_mapping->a_ops = &btrfs_aops;
2078 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
2079 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
2080 inode->i_fop = &btrfs_file_operations;
2081 inode->i_op = &btrfs_file_inode_operations;
2082 break;
2083 case S_IFDIR:
2084 inode->i_fop = &btrfs_dir_file_operations;
2085 if (root == root->fs_info->tree_root)
2086 inode->i_op = &btrfs_dir_ro_inode_operations;
2087 else
2088 inode->i_op = &btrfs_dir_inode_operations;
2089 break;
2090 case S_IFLNK:
2091 inode->i_op = &btrfs_symlink_inode_operations;
2092 inode->i_mapping->a_ops = &btrfs_symlink_aops;
2093 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
2094 break;
2095 default:
2096 inode->i_op = &btrfs_special_inode_operations;
2097 init_special_inode(inode, inode->i_mode, rdev);
2098 break;
2100 return;
2102 make_bad:
2103 btrfs_free_path(path);
2104 make_bad_inode(inode);
2108 * given a leaf and an inode, copy the inode fields into the leaf
2110 static void fill_inode_item(struct btrfs_trans_handle *trans,
2111 struct extent_buffer *leaf,
2112 struct btrfs_inode_item *item,
2113 struct inode *inode)
2115 btrfs_set_inode_uid(leaf, item, inode->i_uid);
2116 btrfs_set_inode_gid(leaf, item, inode->i_gid);
2117 btrfs_set_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size);
2118 btrfs_set_inode_mode(leaf, item, inode->i_mode);
2119 btrfs_set_inode_nlink(leaf, item, inode->i_nlink);
2121 btrfs_set_timespec_sec(leaf, btrfs_inode_atime(item),
2122 inode->i_atime.tv_sec);
2123 btrfs_set_timespec_nsec(leaf, btrfs_inode_atime(item),
2124 inode->i_atime.tv_nsec);
2126 btrfs_set_timespec_sec(leaf, btrfs_inode_mtime(item),
2127 inode->i_mtime.tv_sec);
2128 btrfs_set_timespec_nsec(leaf, btrfs_inode_mtime(item),
2129 inode->i_mtime.tv_nsec);
2131 btrfs_set_timespec_sec(leaf, btrfs_inode_ctime(item),
2132 inode->i_ctime.tv_sec);
2133 btrfs_set_timespec_nsec(leaf, btrfs_inode_ctime(item),
2134 inode->i_ctime.tv_nsec);
2136 btrfs_set_inode_nbytes(leaf, item, inode_get_bytes(inode));
2137 btrfs_set_inode_generation(leaf, item, BTRFS_I(inode)->generation);
2138 btrfs_set_inode_sequence(leaf, item, BTRFS_I(inode)->sequence);
2139 btrfs_set_inode_transid(leaf, item, trans->transid);
2140 btrfs_set_inode_rdev(leaf, item, inode->i_rdev);
2141 btrfs_set_inode_flags(leaf, item, BTRFS_I(inode)->flags);
2142 btrfs_set_inode_block_group(leaf, item, BTRFS_I(inode)->block_group);
2146 * copy everything in the in-memory inode into the btree.
2148 noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
2149 struct btrfs_root *root, struct inode *inode)
2151 struct btrfs_inode_item *inode_item;
2152 struct btrfs_path *path;
2153 struct extent_buffer *leaf;
2154 int ret;
2156 path = btrfs_alloc_path();
2157 BUG_ON(!path);
2158 path->leave_spinning = 1;
2159 ret = btrfs_lookup_inode(trans, root, path,
2160 &BTRFS_I(inode)->location, 1);
2161 if (ret) {
2162 if (ret > 0)
2163 ret = -ENOENT;
2164 goto failed;
2167 btrfs_unlock_up_safe(path, 1);
2168 leaf = path->nodes[0];
2169 inode_item = btrfs_item_ptr(leaf, path->slots[0],
2170 struct btrfs_inode_item);
2172 fill_inode_item(trans, leaf, inode_item, inode);
2173 btrfs_mark_buffer_dirty(leaf);
2174 btrfs_set_inode_last_trans(trans, inode);
2175 ret = 0;
2176 failed:
2177 btrfs_free_path(path);
2178 return ret;
2183 * unlink helper that gets used here in inode.c and in the tree logging
2184 * recovery code. It remove a link in a directory with a given name, and
2185 * also drops the back refs in the inode to the directory
2187 int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
2188 struct btrfs_root *root,
2189 struct inode *dir, struct inode *inode,
2190 const char *name, int name_len)
2192 struct btrfs_path *path;
2193 int ret = 0;
2194 struct extent_buffer *leaf;
2195 struct btrfs_dir_item *di;
2196 struct btrfs_key key;
2197 u64 index;
2199 path = btrfs_alloc_path();
2200 if (!path) {
2201 ret = -ENOMEM;
2202 goto err;
2205 path->leave_spinning = 1;
2206 di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino,
2207 name, name_len, -1);
2208 if (IS_ERR(di)) {
2209 ret = PTR_ERR(di);
2210 goto err;
2212 if (!di) {
2213 ret = -ENOENT;
2214 goto err;
2216 leaf = path->nodes[0];
2217 btrfs_dir_item_key_to_cpu(leaf, di, &key);
2218 ret = btrfs_delete_one_dir_name(trans, root, path, di);
2219 if (ret)
2220 goto err;
2221 btrfs_release_path(root, path);
2223 ret = btrfs_del_inode_ref(trans, root, name, name_len,
2224 inode->i_ino,
2225 dir->i_ino, &index);
2226 if (ret) {
2227 printk(KERN_INFO "btrfs failed to delete reference to %.*s, "
2228 "inode %lu parent %lu\n", name_len, name,
2229 inode->i_ino, dir->i_ino);
2230 goto err;
2233 di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino,
2234 index, name, name_len, -1);
2235 if (IS_ERR(di)) {
2236 ret = PTR_ERR(di);
2237 goto err;
2239 if (!di) {
2240 ret = -ENOENT;
2241 goto err;
2243 ret = btrfs_delete_one_dir_name(trans, root, path, di);
2244 btrfs_release_path(root, path);
2246 ret = btrfs_del_inode_ref_in_log(trans, root, name, name_len,
2247 inode, dir->i_ino);
2248 BUG_ON(ret != 0 && ret != -ENOENT);
2250 ret = btrfs_del_dir_entries_in_log(trans, root, name, name_len,
2251 dir, index);
2252 BUG_ON(ret);
2253 err:
2254 btrfs_free_path(path);
2255 if (ret)
2256 goto out;
2258 btrfs_i_size_write(dir, dir->i_size - name_len * 2);
2259 inode->i_ctime = dir->i_mtime = dir->i_ctime = CURRENT_TIME;
2260 btrfs_update_inode(trans, root, dir);
2261 btrfs_drop_nlink(inode);
2262 ret = btrfs_update_inode(trans, root, inode);
2263 dir->i_sb->s_dirt = 1;
2264 out:
2265 return ret;
2268 static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
2270 struct btrfs_root *root;
2271 struct btrfs_trans_handle *trans;
2272 struct inode *inode = dentry->d_inode;
2273 int ret;
2274 unsigned long nr = 0;
2276 root = BTRFS_I(dir)->root;
2278 trans = btrfs_start_transaction(root, 1);
2280 btrfs_set_trans_block_group(trans, dir);
2282 btrfs_record_unlink_dir(trans, dir, dentry->d_inode, 0);
2284 ret = btrfs_unlink_inode(trans, root, dir, dentry->d_inode,
2285 dentry->d_name.name, dentry->d_name.len);
2287 if (inode->i_nlink == 0)
2288 ret = btrfs_orphan_add(trans, inode);
2290 nr = trans->blocks_used;
2292 btrfs_end_transaction_throttle(trans, root);
2293 btrfs_btree_balance_dirty(root, nr);
2294 return ret;
2297 static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
2299 struct inode *inode = dentry->d_inode;
2300 int err = 0;
2301 int ret;
2302 struct btrfs_root *root = BTRFS_I(dir)->root;
2303 struct btrfs_trans_handle *trans;
2304 unsigned long nr = 0;
2307 * the FIRST_FREE_OBJECTID check makes sure we don't try to rmdir
2308 * the root of a subvolume or snapshot
2310 if (inode->i_size > BTRFS_EMPTY_DIR_SIZE ||
2311 inode->i_ino == BTRFS_FIRST_FREE_OBJECTID) {
2312 return -ENOTEMPTY;
2315 trans = btrfs_start_transaction(root, 1);
2316 btrfs_set_trans_block_group(trans, dir);
2318 err = btrfs_orphan_add(trans, inode);
2319 if (err)
2320 goto fail_trans;
2322 /* now the directory is empty */
2323 err = btrfs_unlink_inode(trans, root, dir, dentry->d_inode,
2324 dentry->d_name.name, dentry->d_name.len);
2325 if (!err)
2326 btrfs_i_size_write(inode, 0);
2328 fail_trans:
2329 nr = trans->blocks_used;
2330 ret = btrfs_end_transaction_throttle(trans, root);
2331 btrfs_btree_balance_dirty(root, nr);
2333 if (ret && !err)
2334 err = ret;
2335 return err;
2338 #if 0
2340 * when truncating bytes in a file, it is possible to avoid reading
2341 * the leaves that contain only checksum items. This can be the
2342 * majority of the IO required to delete a large file, but it must
2343 * be done carefully.
2345 * The keys in the level just above the leaves are checked to make sure
2346 * the lowest key in a given leaf is a csum key, and starts at an offset
2347 * after the new size.
2349 * Then the key for the next leaf is checked to make sure it also has
2350 * a checksum item for the same file. If it does, we know our target leaf
2351 * contains only checksum items, and it can be safely freed without reading
2352 * it.
2354 * This is just an optimization targeted at large files. It may do
2355 * nothing. It will return 0 unless things went badly.
2357 static noinline int drop_csum_leaves(struct btrfs_trans_handle *trans,
2358 struct btrfs_root *root,
2359 struct btrfs_path *path,
2360 struct inode *inode, u64 new_size)
2362 struct btrfs_key key;
2363 int ret;
2364 int nritems;
2365 struct btrfs_key found_key;
2366 struct btrfs_key other_key;
2367 struct btrfs_leaf_ref *ref;
2368 u64 leaf_gen;
2369 u64 leaf_start;
2371 path->lowest_level = 1;
2372 key.objectid = inode->i_ino;
2373 key.type = BTRFS_CSUM_ITEM_KEY;
2374 key.offset = new_size;
2375 again:
2376 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2377 if (ret < 0)
2378 goto out;
2380 if (path->nodes[1] == NULL) {
2381 ret = 0;
2382 goto out;
2384 ret = 0;
2385 btrfs_node_key_to_cpu(path->nodes[1], &found_key, path->slots[1]);
2386 nritems = btrfs_header_nritems(path->nodes[1]);
2388 if (!nritems)
2389 goto out;
2391 if (path->slots[1] >= nritems)
2392 goto next_node;
2394 /* did we find a key greater than anything we want to delete? */
2395 if (found_key.objectid > inode->i_ino ||
2396 (found_key.objectid == inode->i_ino && found_key.type > key.type))
2397 goto out;
2399 /* we check the next key in the node to make sure the leave contains
2400 * only checksum items. This comparison doesn't work if our
2401 * leaf is the last one in the node
2403 if (path->slots[1] + 1 >= nritems) {
2404 next_node:
2405 /* search forward from the last key in the node, this
2406 * will bring us into the next node in the tree
2408 btrfs_node_key_to_cpu(path->nodes[1], &found_key, nritems - 1);
2410 /* unlikely, but we inc below, so check to be safe */
2411 if (found_key.offset == (u64)-1)
2412 goto out;
2414 /* search_forward needs a path with locks held, do the
2415 * search again for the original key. It is possible
2416 * this will race with a balance and return a path that
2417 * we could modify, but this drop is just an optimization
2418 * and is allowed to miss some leaves.
2420 btrfs_release_path(root, path);
2421 found_key.offset++;
2423 /* setup a max key for search_forward */
2424 other_key.offset = (u64)-1;
2425 other_key.type = key.type;
2426 other_key.objectid = key.objectid;
2428 path->keep_locks = 1;
2429 ret = btrfs_search_forward(root, &found_key, &other_key,
2430 path, 0, 0);
2431 path->keep_locks = 0;
2432 if (ret || found_key.objectid != key.objectid ||
2433 found_key.type != key.type) {
2434 ret = 0;
2435 goto out;
2438 key.offset = found_key.offset;
2439 btrfs_release_path(root, path);
2440 cond_resched();
2441 goto again;
2444 /* we know there's one more slot after us in the tree,
2445 * read that key so we can verify it is also a checksum item
2447 btrfs_node_key_to_cpu(path->nodes[1], &other_key, path->slots[1] + 1);
2449 if (found_key.objectid < inode->i_ino)
2450 goto next_key;
2452 if (found_key.type != key.type || found_key.offset < new_size)
2453 goto next_key;
2456 * if the key for the next leaf isn't a csum key from this objectid,
2457 * we can't be sure there aren't good items inside this leaf.
2458 * Bail out
2460 if (other_key.objectid != inode->i_ino || other_key.type != key.type)
2461 goto out;
2463 leaf_start = btrfs_node_blockptr(path->nodes[1], path->slots[1]);
2464 leaf_gen = btrfs_node_ptr_generation(path->nodes[1], path->slots[1]);
2466 * it is safe to delete this leaf, it contains only
2467 * csum items from this inode at an offset >= new_size
2469 ret = btrfs_del_leaf(trans, root, path, leaf_start);
2470 BUG_ON(ret);
2472 if (root->ref_cows && leaf_gen < trans->transid) {
2473 ref = btrfs_alloc_leaf_ref(root, 0);
2474 if (ref) {
2475 ref->root_gen = root->root_key.offset;
2476 ref->bytenr = leaf_start;
2477 ref->owner = 0;
2478 ref->generation = leaf_gen;
2479 ref->nritems = 0;
2481 btrfs_sort_leaf_ref(ref);
2483 ret = btrfs_add_leaf_ref(root, ref, 0);
2484 WARN_ON(ret);
2485 btrfs_free_leaf_ref(root, ref);
2486 } else {
2487 WARN_ON(1);
2490 next_key:
2491 btrfs_release_path(root, path);
2493 if (other_key.objectid == inode->i_ino &&
2494 other_key.type == key.type && other_key.offset > key.offset) {
2495 key.offset = other_key.offset;
2496 cond_resched();
2497 goto again;
2499 ret = 0;
2500 out:
2501 /* fixup any changes we've made to the path */
2502 path->lowest_level = 0;
2503 path->keep_locks = 0;
2504 btrfs_release_path(root, path);
2505 return ret;
2508 #endif
2511 * this can truncate away extent items, csum items and directory items.
2512 * It starts at a high offset and removes keys until it can't find
2513 * any higher than new_size
2515 * csum items that cross the new i_size are truncated to the new size
2516 * as well.
2518 * min_type is the minimum key type to truncate down to. If set to 0, this
2519 * will kill all the items on this inode, including the INODE_ITEM_KEY.
2521 noinline int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
2522 struct btrfs_root *root,
2523 struct inode *inode,
2524 u64 new_size, u32 min_type)
2526 int ret;
2527 struct btrfs_path *path;
2528 struct btrfs_key key;
2529 struct btrfs_key found_key;
2530 u32 found_type = (u8)-1;
2531 struct extent_buffer *leaf;
2532 struct btrfs_file_extent_item *fi;
2533 u64 extent_start = 0;
2534 u64 extent_num_bytes = 0;
2535 u64 item_end = 0;
2536 u64 root_gen = 0;
2537 u64 root_owner = 0;
2538 int found_extent;
2539 int del_item;
2540 int pending_del_nr = 0;
2541 int pending_del_slot = 0;
2542 int extent_type = -1;
2543 int encoding;
2544 u64 mask = root->sectorsize - 1;
2546 if (root->ref_cows)
2547 btrfs_drop_extent_cache(inode, new_size & (~mask), (u64)-1, 0);
2548 path = btrfs_alloc_path();
2549 path->reada = -1;
2550 BUG_ON(!path);
2552 /* FIXME, add redo link to tree so we don't leak on crash */
2553 key.objectid = inode->i_ino;
2554 key.offset = (u64)-1;
2555 key.type = (u8)-1;
2557 search_again:
2558 path->leave_spinning = 1;
2559 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2560 if (ret < 0)
2561 goto error;
2563 if (ret > 0) {
2564 /* there are no items in the tree for us to truncate, we're
2565 * done
2567 if (path->slots[0] == 0) {
2568 ret = 0;
2569 goto error;
2571 path->slots[0]--;
2574 while (1) {
2575 fi = NULL;
2576 leaf = path->nodes[0];
2577 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2578 found_type = btrfs_key_type(&found_key);
2579 encoding = 0;
2581 if (found_key.objectid != inode->i_ino)
2582 break;
2584 if (found_type < min_type)
2585 break;
2587 item_end = found_key.offset;
2588 if (found_type == BTRFS_EXTENT_DATA_KEY) {
2589 fi = btrfs_item_ptr(leaf, path->slots[0],
2590 struct btrfs_file_extent_item);
2591 extent_type = btrfs_file_extent_type(leaf, fi);
2592 encoding = btrfs_file_extent_compression(leaf, fi);
2593 encoding |= btrfs_file_extent_encryption(leaf, fi);
2594 encoding |= btrfs_file_extent_other_encoding(leaf, fi);
2596 if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
2597 item_end +=
2598 btrfs_file_extent_num_bytes(leaf, fi);
2599 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
2600 item_end += btrfs_file_extent_inline_len(leaf,
2601 fi);
2603 item_end--;
2605 if (item_end < new_size) {
2606 if (found_type == BTRFS_DIR_ITEM_KEY)
2607 found_type = BTRFS_INODE_ITEM_KEY;
2608 else if (found_type == BTRFS_EXTENT_ITEM_KEY)
2609 found_type = BTRFS_EXTENT_DATA_KEY;
2610 else if (found_type == BTRFS_EXTENT_DATA_KEY)
2611 found_type = BTRFS_XATTR_ITEM_KEY;
2612 else if (found_type == BTRFS_XATTR_ITEM_KEY)
2613 found_type = BTRFS_INODE_REF_KEY;
2614 else if (found_type)
2615 found_type--;
2616 else
2617 break;
2618 btrfs_set_key_type(&key, found_type);
2619 goto next;
2621 if (found_key.offset >= new_size)
2622 del_item = 1;
2623 else
2624 del_item = 0;
2625 found_extent = 0;
2627 /* FIXME, shrink the extent if the ref count is only 1 */
2628 if (found_type != BTRFS_EXTENT_DATA_KEY)
2629 goto delete;
2631 if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
2632 u64 num_dec;
2633 extent_start = btrfs_file_extent_disk_bytenr(leaf, fi);
2634 if (!del_item && !encoding) {
2635 u64 orig_num_bytes =
2636 btrfs_file_extent_num_bytes(leaf, fi);
2637 extent_num_bytes = new_size -
2638 found_key.offset + root->sectorsize - 1;
2639 extent_num_bytes = extent_num_bytes &
2640 ~((u64)root->sectorsize - 1);
2641 btrfs_set_file_extent_num_bytes(leaf, fi,
2642 extent_num_bytes);
2643 num_dec = (orig_num_bytes -
2644 extent_num_bytes);
2645 if (root->ref_cows && extent_start != 0)
2646 inode_sub_bytes(inode, num_dec);
2647 btrfs_mark_buffer_dirty(leaf);
2648 } else {
2649 extent_num_bytes =
2650 btrfs_file_extent_disk_num_bytes(leaf,
2651 fi);
2652 /* FIXME blocksize != 4096 */
2653 num_dec = btrfs_file_extent_num_bytes(leaf, fi);
2654 if (extent_start != 0) {
2655 found_extent = 1;
2656 if (root->ref_cows)
2657 inode_sub_bytes(inode, num_dec);
2659 root_gen = btrfs_header_generation(leaf);
2660 root_owner = btrfs_header_owner(leaf);
2662 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
2664 * we can't truncate inline items that have had
2665 * special encodings
2667 if (!del_item &&
2668 btrfs_file_extent_compression(leaf, fi) == 0 &&
2669 btrfs_file_extent_encryption(leaf, fi) == 0 &&
2670 btrfs_file_extent_other_encoding(leaf, fi) == 0) {
2671 u32 size = new_size - found_key.offset;
2673 if (root->ref_cows) {
2674 inode_sub_bytes(inode, item_end + 1 -
2675 new_size);
2677 size =
2678 btrfs_file_extent_calc_inline_size(size);
2679 ret = btrfs_truncate_item(trans, root, path,
2680 size, 1);
2681 BUG_ON(ret);
2682 } else if (root->ref_cows) {
2683 inode_sub_bytes(inode, item_end + 1 -
2684 found_key.offset);
2687 delete:
2688 if (del_item) {
2689 if (!pending_del_nr) {
2690 /* no pending yet, add ourselves */
2691 pending_del_slot = path->slots[0];
2692 pending_del_nr = 1;
2693 } else if (pending_del_nr &&
2694 path->slots[0] + 1 == pending_del_slot) {
2695 /* hop on the pending chunk */
2696 pending_del_nr++;
2697 pending_del_slot = path->slots[0];
2698 } else {
2699 BUG();
2701 } else {
2702 break;
2704 if (found_extent) {
2705 btrfs_set_path_blocking(path);
2706 ret = btrfs_free_extent(trans, root, extent_start,
2707 extent_num_bytes,
2708 leaf->start, root_owner,
2709 root_gen, inode->i_ino, 0);
2710 BUG_ON(ret);
2712 next:
2713 if (path->slots[0] == 0) {
2714 if (pending_del_nr)
2715 goto del_pending;
2716 btrfs_release_path(root, path);
2717 if (found_type == BTRFS_INODE_ITEM_KEY)
2718 break;
2719 goto search_again;
2722 path->slots[0]--;
2723 if (pending_del_nr &&
2724 path->slots[0] + 1 != pending_del_slot) {
2725 struct btrfs_key debug;
2726 del_pending:
2727 btrfs_item_key_to_cpu(path->nodes[0], &debug,
2728 pending_del_slot);
2729 ret = btrfs_del_items(trans, root, path,
2730 pending_del_slot,
2731 pending_del_nr);
2732 BUG_ON(ret);
2733 pending_del_nr = 0;
2734 btrfs_release_path(root, path);
2735 if (found_type == BTRFS_INODE_ITEM_KEY)
2736 break;
2737 goto search_again;
2740 ret = 0;
2741 error:
2742 if (pending_del_nr) {
2743 ret = btrfs_del_items(trans, root, path, pending_del_slot,
2744 pending_del_nr);
2746 btrfs_free_path(path);
2747 inode->i_sb->s_dirt = 1;
2748 return ret;
2752 * taken from block_truncate_page, but does cow as it zeros out
2753 * any bytes left in the last page in the file.
2755 static int btrfs_truncate_page(struct address_space *mapping, loff_t from)
2757 struct inode *inode = mapping->host;
2758 struct btrfs_root *root = BTRFS_I(inode)->root;
2759 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
2760 struct btrfs_ordered_extent *ordered;
2761 char *kaddr;
2762 u32 blocksize = root->sectorsize;
2763 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2764 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2765 struct page *page;
2766 int ret = 0;
2767 u64 page_start;
2768 u64 page_end;
2770 if ((offset & (blocksize - 1)) == 0)
2771 goto out;
2773 ret = -ENOMEM;
2774 again:
2775 page = grab_cache_page(mapping, index);
2776 if (!page)
2777 goto out;
2779 page_start = page_offset(page);
2780 page_end = page_start + PAGE_CACHE_SIZE - 1;
2782 if (!PageUptodate(page)) {
2783 ret = btrfs_readpage(NULL, page);
2784 lock_page(page);
2785 if (page->mapping != mapping) {
2786 unlock_page(page);
2787 page_cache_release(page);
2788 goto again;
2790 if (!PageUptodate(page)) {
2791 ret = -EIO;
2792 goto out_unlock;
2795 wait_on_page_writeback(page);
2797 lock_extent(io_tree, page_start, page_end, GFP_NOFS);
2798 set_page_extent_mapped(page);
2800 ordered = btrfs_lookup_ordered_extent(inode, page_start);
2801 if (ordered) {
2802 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
2803 unlock_page(page);
2804 page_cache_release(page);
2805 btrfs_start_ordered_extent(inode, ordered, 1);
2806 btrfs_put_ordered_extent(ordered);
2807 goto again;
2810 btrfs_set_extent_delalloc(inode, page_start, page_end);
2811 ret = 0;
2812 if (offset != PAGE_CACHE_SIZE) {
2813 kaddr = kmap(page);
2814 memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
2815 flush_dcache_page(page);
2816 kunmap(page);
2818 ClearPageChecked(page);
2819 set_page_dirty(page);
2820 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
2822 out_unlock:
2823 unlock_page(page);
2824 page_cache_release(page);
2825 out:
2826 return ret;
2829 int btrfs_cont_expand(struct inode *inode, loff_t size)
2831 struct btrfs_trans_handle *trans;
2832 struct btrfs_root *root = BTRFS_I(inode)->root;
2833 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
2834 struct extent_map *em;
2835 u64 mask = root->sectorsize - 1;
2836 u64 hole_start = (inode->i_size + mask) & ~mask;
2837 u64 block_end = (size + mask) & ~mask;
2838 u64 last_byte;
2839 u64 cur_offset;
2840 u64 hole_size;
2841 int err;
2843 if (size <= hole_start)
2844 return 0;
2846 err = btrfs_check_metadata_free_space(root);
2847 if (err)
2848 return err;
2850 btrfs_truncate_page(inode->i_mapping, inode->i_size);
2852 while (1) {
2853 struct btrfs_ordered_extent *ordered;
2854 btrfs_wait_ordered_range(inode, hole_start,
2855 block_end - hole_start);
2856 lock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS);
2857 ordered = btrfs_lookup_ordered_extent(inode, hole_start);
2858 if (!ordered)
2859 break;
2860 unlock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS);
2861 btrfs_put_ordered_extent(ordered);
2864 trans = btrfs_start_transaction(root, 1);
2865 btrfs_set_trans_block_group(trans, inode);
2867 cur_offset = hole_start;
2868 while (1) {
2869 em = btrfs_get_extent(inode, NULL, 0, cur_offset,
2870 block_end - cur_offset, 0);
2871 BUG_ON(IS_ERR(em) || !em);
2872 last_byte = min(extent_map_end(em), block_end);
2873 last_byte = (last_byte + mask) & ~mask;
2874 if (test_bit(EXTENT_FLAG_VACANCY, &em->flags)) {
2875 u64 hint_byte = 0;
2876 hole_size = last_byte - cur_offset;
2877 err = btrfs_drop_extents(trans, root, inode,
2878 cur_offset,
2879 cur_offset + hole_size,
2880 cur_offset, &hint_byte);
2881 if (err)
2882 break;
2883 err = btrfs_insert_file_extent(trans, root,
2884 inode->i_ino, cur_offset, 0,
2885 0, hole_size, 0, hole_size,
2886 0, 0, 0);
2887 btrfs_drop_extent_cache(inode, hole_start,
2888 last_byte - 1, 0);
2890 free_extent_map(em);
2891 cur_offset = last_byte;
2892 if (err || cur_offset >= block_end)
2893 break;
2896 btrfs_end_transaction(trans, root);
2897 unlock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS);
2898 return err;
2901 static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
2903 struct inode *inode = dentry->d_inode;
2904 int err;
2906 err = inode_change_ok(inode, attr);
2907 if (err)
2908 return err;
2910 if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
2911 if (attr->ia_size > inode->i_size) {
2912 err = btrfs_cont_expand(inode, attr->ia_size);
2913 if (err)
2914 return err;
2915 } else if (inode->i_size > 0 &&
2916 attr->ia_size == 0) {
2918 /* we're truncating a file that used to have good
2919 * data down to zero. Make sure it gets into
2920 * the ordered flush list so that any new writes
2921 * get down to disk quickly.
2923 BTRFS_I(inode)->ordered_data_close = 1;
2927 err = inode_setattr(inode, attr);
2929 if (!err && ((attr->ia_valid & ATTR_MODE)))
2930 err = btrfs_acl_chmod(inode);
2931 return err;
2934 void btrfs_delete_inode(struct inode *inode)
2936 struct btrfs_trans_handle *trans;
2937 struct btrfs_root *root = BTRFS_I(inode)->root;
2938 unsigned long nr;
2939 int ret;
2941 truncate_inode_pages(&inode->i_data, 0);
2942 if (is_bad_inode(inode)) {
2943 btrfs_orphan_del(NULL, inode);
2944 goto no_delete;
2946 btrfs_wait_ordered_range(inode, 0, (u64)-1);
2948 btrfs_i_size_write(inode, 0);
2949 trans = btrfs_join_transaction(root, 1);
2951 btrfs_set_trans_block_group(trans, inode);
2952 ret = btrfs_truncate_inode_items(trans, root, inode, inode->i_size, 0);
2953 if (ret) {
2954 btrfs_orphan_del(NULL, inode);
2955 goto no_delete_lock;
2958 btrfs_orphan_del(trans, inode);
2960 nr = trans->blocks_used;
2961 clear_inode(inode);
2963 btrfs_end_transaction(trans, root);
2964 btrfs_btree_balance_dirty(root, nr);
2965 return;
2967 no_delete_lock:
2968 nr = trans->blocks_used;
2969 btrfs_end_transaction(trans, root);
2970 btrfs_btree_balance_dirty(root, nr);
2971 no_delete:
2972 clear_inode(inode);
2976 * this returns the key found in the dir entry in the location pointer.
2977 * If no dir entries were found, location->objectid is 0.
2979 static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry,
2980 struct btrfs_key *location)
2982 const char *name = dentry->d_name.name;
2983 int namelen = dentry->d_name.len;
2984 struct btrfs_dir_item *di;
2985 struct btrfs_path *path;
2986 struct btrfs_root *root = BTRFS_I(dir)->root;
2987 int ret = 0;
2989 path = btrfs_alloc_path();
2990 BUG_ON(!path);
2992 di = btrfs_lookup_dir_item(NULL, root, path, dir->i_ino, name,
2993 namelen, 0);
2994 if (IS_ERR(di))
2995 ret = PTR_ERR(di);
2997 if (!di || IS_ERR(di))
2998 goto out_err;
3000 btrfs_dir_item_key_to_cpu(path->nodes[0], di, location);
3001 out:
3002 btrfs_free_path(path);
3003 return ret;
3004 out_err:
3005 location->objectid = 0;
3006 goto out;
3010 * when we hit a tree root in a directory, the btrfs part of the inode
3011 * needs to be changed to reflect the root directory of the tree root. This
3012 * is kind of like crossing a mount point.
3014 static int fixup_tree_root_location(struct btrfs_root *root,
3015 struct btrfs_key *location,
3016 struct btrfs_root **sub_root,
3017 struct dentry *dentry)
3019 struct btrfs_root_item *ri;
3021 if (btrfs_key_type(location) != BTRFS_ROOT_ITEM_KEY)
3022 return 0;
3023 if (location->objectid == BTRFS_ROOT_TREE_OBJECTID)
3024 return 0;
3026 *sub_root = btrfs_read_fs_root(root->fs_info, location,
3027 dentry->d_name.name,
3028 dentry->d_name.len);
3029 if (IS_ERR(*sub_root))
3030 return PTR_ERR(*sub_root);
3032 ri = &(*sub_root)->root_item;
3033 location->objectid = btrfs_root_dirid(ri);
3034 btrfs_set_key_type(location, BTRFS_INODE_ITEM_KEY);
3035 location->offset = 0;
3037 return 0;
3040 static noinline void init_btrfs_i(struct inode *inode)
3042 struct btrfs_inode *bi = BTRFS_I(inode);
3044 bi->i_acl = NULL;
3045 bi->i_default_acl = NULL;
3047 bi->generation = 0;
3048 bi->sequence = 0;
3049 bi->last_trans = 0;
3050 bi->logged_trans = 0;
3051 bi->delalloc_bytes = 0;
3052 bi->reserved_bytes = 0;
3053 bi->disk_i_size = 0;
3054 bi->flags = 0;
3055 bi->index_cnt = (u64)-1;
3056 bi->last_unlink_trans = 0;
3057 extent_map_tree_init(&BTRFS_I(inode)->extent_tree, GFP_NOFS);
3058 extent_io_tree_init(&BTRFS_I(inode)->io_tree,
3059 inode->i_mapping, GFP_NOFS);
3060 extent_io_tree_init(&BTRFS_I(inode)->io_failure_tree,
3061 inode->i_mapping, GFP_NOFS);
3062 INIT_LIST_HEAD(&BTRFS_I(inode)->delalloc_inodes);
3063 INIT_LIST_HEAD(&BTRFS_I(inode)->ordered_operations);
3064 btrfs_ordered_inode_tree_init(&BTRFS_I(inode)->ordered_tree);
3065 mutex_init(&BTRFS_I(inode)->extent_mutex);
3066 mutex_init(&BTRFS_I(inode)->log_mutex);
3069 static int btrfs_init_locked_inode(struct inode *inode, void *p)
3071 struct btrfs_iget_args *args = p;
3072 inode->i_ino = args->ino;
3073 init_btrfs_i(inode);
3074 BTRFS_I(inode)->root = args->root;
3075 btrfs_set_inode_space_info(args->root, inode);
3076 return 0;
3079 static int btrfs_find_actor(struct inode *inode, void *opaque)
3081 struct btrfs_iget_args *args = opaque;
3082 return args->ino == inode->i_ino &&
3083 args->root == BTRFS_I(inode)->root;
3086 struct inode *btrfs_ilookup(struct super_block *s, u64 objectid,
3087 struct btrfs_root *root, int wait)
3089 struct inode *inode;
3090 struct btrfs_iget_args args;
3091 args.ino = objectid;
3092 args.root = root;
3094 if (wait) {
3095 inode = ilookup5(s, objectid, btrfs_find_actor,
3096 (void *)&args);
3097 } else {
3098 inode = ilookup5_nowait(s, objectid, btrfs_find_actor,
3099 (void *)&args);
3101 return inode;
3104 struct inode *btrfs_iget_locked(struct super_block *s, u64 objectid,
3105 struct btrfs_root *root)
3107 struct inode *inode;
3108 struct btrfs_iget_args args;
3109 args.ino = objectid;
3110 args.root = root;
3112 inode = iget5_locked(s, objectid, btrfs_find_actor,
3113 btrfs_init_locked_inode,
3114 (void *)&args);
3115 return inode;
3118 /* Get an inode object given its location and corresponding root.
3119 * Returns in *is_new if the inode was read from disk
3121 struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
3122 struct btrfs_root *root, int *is_new)
3124 struct inode *inode;
3126 inode = btrfs_iget_locked(s, location->objectid, root);
3127 if (!inode)
3128 return ERR_PTR(-EACCES);
3130 if (inode->i_state & I_NEW) {
3131 BTRFS_I(inode)->root = root;
3132 memcpy(&BTRFS_I(inode)->location, location, sizeof(*location));
3133 btrfs_read_locked_inode(inode);
3134 unlock_new_inode(inode);
3135 if (is_new)
3136 *is_new = 1;
3137 } else {
3138 if (is_new)
3139 *is_new = 0;
3142 return inode;
3145 struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
3147 struct inode *inode;
3148 struct btrfs_inode *bi = BTRFS_I(dir);
3149 struct btrfs_root *root = bi->root;
3150 struct btrfs_root *sub_root = root;
3151 struct btrfs_key location;
3152 int ret, new;
3154 if (dentry->d_name.len > BTRFS_NAME_LEN)
3155 return ERR_PTR(-ENAMETOOLONG);
3157 ret = btrfs_inode_by_name(dir, dentry, &location);
3159 if (ret < 0)
3160 return ERR_PTR(ret);
3162 inode = NULL;
3163 if (location.objectid) {
3164 ret = fixup_tree_root_location(root, &location, &sub_root,
3165 dentry);
3166 if (ret < 0)
3167 return ERR_PTR(ret);
3168 if (ret > 0)
3169 return ERR_PTR(-ENOENT);
3170 inode = btrfs_iget(dir->i_sb, &location, sub_root, &new);
3171 if (IS_ERR(inode))
3172 return ERR_CAST(inode);
3174 return inode;
3177 static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
3178 struct nameidata *nd)
3180 struct inode *inode;
3182 if (dentry->d_name.len > BTRFS_NAME_LEN)
3183 return ERR_PTR(-ENAMETOOLONG);
3185 inode = btrfs_lookup_dentry(dir, dentry);
3186 if (IS_ERR(inode))
3187 return ERR_CAST(inode);
3189 return d_splice_alias(inode, dentry);
3192 static unsigned char btrfs_filetype_table[] = {
3193 DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
3196 static int btrfs_real_readdir(struct file *filp, void *dirent,
3197 filldir_t filldir)
3199 struct inode *inode = filp->f_dentry->d_inode;
3200 struct btrfs_root *root = BTRFS_I(inode)->root;
3201 struct btrfs_item *item;
3202 struct btrfs_dir_item *di;
3203 struct btrfs_key key;
3204 struct btrfs_key found_key;
3205 struct btrfs_path *path;
3206 int ret;
3207 u32 nritems;
3208 struct extent_buffer *leaf;
3209 int slot;
3210 int advance;
3211 unsigned char d_type;
3212 int over = 0;
3213 u32 di_cur;
3214 u32 di_total;
3215 u32 di_len;
3216 int key_type = BTRFS_DIR_INDEX_KEY;
3217 char tmp_name[32];
3218 char *name_ptr;
3219 int name_len;
3221 /* FIXME, use a real flag for deciding about the key type */
3222 if (root->fs_info->tree_root == root)
3223 key_type = BTRFS_DIR_ITEM_KEY;
3225 /* special case for "." */
3226 if (filp->f_pos == 0) {
3227 over = filldir(dirent, ".", 1,
3228 1, inode->i_ino,
3229 DT_DIR);
3230 if (over)
3231 return 0;
3232 filp->f_pos = 1;
3234 /* special case for .., just use the back ref */
3235 if (filp->f_pos == 1) {
3236 u64 pino = parent_ino(filp->f_path.dentry);
3237 over = filldir(dirent, "..", 2,
3238 2, pino, DT_DIR);
3239 if (over)
3240 return 0;
3241 filp->f_pos = 2;
3243 path = btrfs_alloc_path();
3244 path->reada = 2;
3246 btrfs_set_key_type(&key, key_type);
3247 key.offset = filp->f_pos;
3248 key.objectid = inode->i_ino;
3250 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3251 if (ret < 0)
3252 goto err;
3253 advance = 0;
3255 while (1) {
3256 leaf = path->nodes[0];
3257 nritems = btrfs_header_nritems(leaf);
3258 slot = path->slots[0];
3259 if (advance || slot >= nritems) {
3260 if (slot >= nritems - 1) {
3261 ret = btrfs_next_leaf(root, path);
3262 if (ret)
3263 break;
3264 leaf = path->nodes[0];
3265 nritems = btrfs_header_nritems(leaf);
3266 slot = path->slots[0];
3267 } else {
3268 slot++;
3269 path->slots[0]++;
3273 advance = 1;
3274 item = btrfs_item_nr(leaf, slot);
3275 btrfs_item_key_to_cpu(leaf, &found_key, slot);
3277 if (found_key.objectid != key.objectid)
3278 break;
3279 if (btrfs_key_type(&found_key) != key_type)
3280 break;
3281 if (found_key.offset < filp->f_pos)
3282 continue;
3284 filp->f_pos = found_key.offset;
3286 di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
3287 di_cur = 0;
3288 di_total = btrfs_item_size(leaf, item);
3290 while (di_cur < di_total) {
3291 struct btrfs_key location;
3293 name_len = btrfs_dir_name_len(leaf, di);
3294 if (name_len <= sizeof(tmp_name)) {
3295 name_ptr = tmp_name;
3296 } else {
3297 name_ptr = kmalloc(name_len, GFP_NOFS);
3298 if (!name_ptr) {
3299 ret = -ENOMEM;
3300 goto err;
3303 read_extent_buffer(leaf, name_ptr,
3304 (unsigned long)(di + 1), name_len);
3306 d_type = btrfs_filetype_table[btrfs_dir_type(leaf, di)];
3307 btrfs_dir_item_key_to_cpu(leaf, di, &location);
3309 /* is this a reference to our own snapshot? If so
3310 * skip it
3312 if (location.type == BTRFS_ROOT_ITEM_KEY &&
3313 location.objectid == root->root_key.objectid) {
3314 over = 0;
3315 goto skip;
3317 over = filldir(dirent, name_ptr, name_len,
3318 found_key.offset, location.objectid,
3319 d_type);
3321 skip:
3322 if (name_ptr != tmp_name)
3323 kfree(name_ptr);
3325 if (over)
3326 goto nopos;
3327 di_len = btrfs_dir_name_len(leaf, di) +
3328 btrfs_dir_data_len(leaf, di) + sizeof(*di);
3329 di_cur += di_len;
3330 di = (struct btrfs_dir_item *)((char *)di + di_len);
3334 /* Reached end of directory/root. Bump pos past the last item. */
3335 if (key_type == BTRFS_DIR_INDEX_KEY)
3336 filp->f_pos = INT_LIMIT(off_t);
3337 else
3338 filp->f_pos++;
3339 nopos:
3340 ret = 0;
3341 err:
3342 btrfs_free_path(path);
3343 return ret;
3346 int btrfs_write_inode(struct inode *inode, int wait)
3348 struct btrfs_root *root = BTRFS_I(inode)->root;
3349 struct btrfs_trans_handle *trans;
3350 int ret = 0;
3352 if (root->fs_info->btree_inode == inode)
3353 return 0;
3355 if (wait) {
3356 trans = btrfs_join_transaction(root, 1);
3357 btrfs_set_trans_block_group(trans, inode);
3358 ret = btrfs_commit_transaction(trans, root);
3360 return ret;
3364 * This is somewhat expensive, updating the tree every time the
3365 * inode changes. But, it is most likely to find the inode in cache.
3366 * FIXME, needs more benchmarking...there are no reasons other than performance
3367 * to keep or drop this code.
3369 void btrfs_dirty_inode(struct inode *inode)
3371 struct btrfs_root *root = BTRFS_I(inode)->root;
3372 struct btrfs_trans_handle *trans;
3374 trans = btrfs_join_transaction(root, 1);
3375 btrfs_set_trans_block_group(trans, inode);
3376 btrfs_update_inode(trans, root, inode);
3377 btrfs_end_transaction(trans, root);
3381 * find the highest existing sequence number in a directory
3382 * and then set the in-memory index_cnt variable to reflect
3383 * free sequence numbers
3385 static int btrfs_set_inode_index_count(struct inode *inode)
3387 struct btrfs_root *root = BTRFS_I(inode)->root;
3388 struct btrfs_key key, found_key;
3389 struct btrfs_path *path;
3390 struct extent_buffer *leaf;
3391 int ret;
3393 key.objectid = inode->i_ino;
3394 btrfs_set_key_type(&key, BTRFS_DIR_INDEX_KEY);
3395 key.offset = (u64)-1;
3397 path = btrfs_alloc_path();
3398 if (!path)
3399 return -ENOMEM;
3401 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3402 if (ret < 0)
3403 goto out;
3404 /* FIXME: we should be able to handle this */
3405 if (ret == 0)
3406 goto out;
3407 ret = 0;
3410 * MAGIC NUMBER EXPLANATION:
3411 * since we search a directory based on f_pos we have to start at 2
3412 * since '.' and '..' have f_pos of 0 and 1 respectively, so everybody
3413 * else has to start at 2
3415 if (path->slots[0] == 0) {
3416 BTRFS_I(inode)->index_cnt = 2;
3417 goto out;
3420 path->slots[0]--;
3422 leaf = path->nodes[0];
3423 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3425 if (found_key.objectid != inode->i_ino ||
3426 btrfs_key_type(&found_key) != BTRFS_DIR_INDEX_KEY) {
3427 BTRFS_I(inode)->index_cnt = 2;
3428 goto out;
3431 BTRFS_I(inode)->index_cnt = found_key.offset + 1;
3432 out:
3433 btrfs_free_path(path);
3434 return ret;
3438 * helper to find a free sequence number in a given directory. This current
3439 * code is very simple, later versions will do smarter things in the btree
3441 int btrfs_set_inode_index(struct inode *dir, u64 *index)
3443 int ret = 0;
3445 if (BTRFS_I(dir)->index_cnt == (u64)-1) {
3446 ret = btrfs_set_inode_index_count(dir);
3447 if (ret)
3448 return ret;
3451 *index = BTRFS_I(dir)->index_cnt;
3452 BTRFS_I(dir)->index_cnt++;
3454 return ret;
3457 static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
3458 struct btrfs_root *root,
3459 struct inode *dir,
3460 const char *name, int name_len,
3461 u64 ref_objectid, u64 objectid,
3462 u64 alloc_hint, int mode, u64 *index)
3464 struct inode *inode;
3465 struct btrfs_inode_item *inode_item;
3466 struct btrfs_key *location;
3467 struct btrfs_path *path;
3468 struct btrfs_inode_ref *ref;
3469 struct btrfs_key key[2];
3470 u32 sizes[2];
3471 unsigned long ptr;
3472 int ret;
3473 int owner;
3475 path = btrfs_alloc_path();
3476 BUG_ON(!path);
3478 inode = new_inode(root->fs_info->sb);
3479 if (!inode)
3480 return ERR_PTR(-ENOMEM);
3482 if (dir) {
3483 ret = btrfs_set_inode_index(dir, index);
3484 if (ret) {
3485 iput(inode);
3486 return ERR_PTR(ret);
3490 * index_cnt is ignored for everything but a dir,
3491 * btrfs_get_inode_index_count has an explanation for the magic
3492 * number
3494 init_btrfs_i(inode);
3495 BTRFS_I(inode)->index_cnt = 2;
3496 BTRFS_I(inode)->root = root;
3497 BTRFS_I(inode)->generation = trans->transid;
3498 btrfs_set_inode_space_info(root, inode);
3500 if (mode & S_IFDIR)
3501 owner = 0;
3502 else
3503 owner = 1;
3504 BTRFS_I(inode)->block_group =
3505 btrfs_find_block_group(root, 0, alloc_hint, owner);
3506 if ((mode & S_IFREG)) {
3507 if (btrfs_test_opt(root, NODATASUM))
3508 btrfs_set_flag(inode, NODATASUM);
3509 if (btrfs_test_opt(root, NODATACOW))
3510 btrfs_set_flag(inode, NODATACOW);
3513 key[0].objectid = objectid;
3514 btrfs_set_key_type(&key[0], BTRFS_INODE_ITEM_KEY);
3515 key[0].offset = 0;
3517 key[1].objectid = objectid;
3518 btrfs_set_key_type(&key[1], BTRFS_INODE_REF_KEY);
3519 key[1].offset = ref_objectid;
3521 sizes[0] = sizeof(struct btrfs_inode_item);
3522 sizes[1] = name_len + sizeof(*ref);
3524 path->leave_spinning = 1;
3525 ret = btrfs_insert_empty_items(trans, root, path, key, sizes, 2);
3526 if (ret != 0)
3527 goto fail;
3529 if (objectid > root->highest_inode)
3530 root->highest_inode = objectid;
3532 inode->i_uid = current_fsuid();
3534 if (dir && (dir->i_mode & S_ISGID)) {
3535 inode->i_gid = dir->i_gid;
3536 if (S_ISDIR(mode))
3537 mode |= S_ISGID;
3538 } else
3539 inode->i_gid = current_fsgid();
3541 inode->i_mode = mode;
3542 inode->i_ino = objectid;
3543 inode_set_bytes(inode, 0);
3544 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
3545 inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
3546 struct btrfs_inode_item);
3547 fill_inode_item(trans, path->nodes[0], inode_item, inode);
3549 ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1,
3550 struct btrfs_inode_ref);
3551 btrfs_set_inode_ref_name_len(path->nodes[0], ref, name_len);
3552 btrfs_set_inode_ref_index(path->nodes[0], ref, *index);
3553 ptr = (unsigned long)(ref + 1);
3554 write_extent_buffer(path->nodes[0], name, ptr, name_len);
3556 btrfs_mark_buffer_dirty(path->nodes[0]);
3557 btrfs_free_path(path);
3559 location = &BTRFS_I(inode)->location;
3560 location->objectid = objectid;
3561 location->offset = 0;
3562 btrfs_set_key_type(location, BTRFS_INODE_ITEM_KEY);
3564 insert_inode_hash(inode);
3565 return inode;
3566 fail:
3567 if (dir)
3568 BTRFS_I(dir)->index_cnt--;
3569 btrfs_free_path(path);
3570 iput(inode);
3571 return ERR_PTR(ret);
3574 static inline u8 btrfs_inode_type(struct inode *inode)
3576 return btrfs_type_by_mode[(inode->i_mode & S_IFMT) >> S_SHIFT];
3580 * utility function to add 'inode' into 'parent_inode' with
3581 * a give name and a given sequence number.
3582 * if 'add_backref' is true, also insert a backref from the
3583 * inode to the parent directory.
3585 int btrfs_add_link(struct btrfs_trans_handle *trans,
3586 struct inode *parent_inode, struct inode *inode,
3587 const char *name, int name_len, int add_backref, u64 index)
3589 int ret;
3590 struct btrfs_key key;
3591 struct btrfs_root *root = BTRFS_I(parent_inode)->root;
3593 key.objectid = inode->i_ino;
3594 btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
3595 key.offset = 0;
3597 ret = btrfs_insert_dir_item(trans, root, name, name_len,
3598 parent_inode->i_ino,
3599 &key, btrfs_inode_type(inode),
3600 index);
3601 if (ret == 0) {
3602 if (add_backref) {
3603 ret = btrfs_insert_inode_ref(trans, root,
3604 name, name_len,
3605 inode->i_ino,
3606 parent_inode->i_ino,
3607 index);
3609 btrfs_i_size_write(parent_inode, parent_inode->i_size +
3610 name_len * 2);
3611 parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME;
3612 ret = btrfs_update_inode(trans, root, parent_inode);
3614 return ret;
3617 static int btrfs_add_nondir(struct btrfs_trans_handle *trans,
3618 struct dentry *dentry, struct inode *inode,
3619 int backref, u64 index)
3621 int err = btrfs_add_link(trans, dentry->d_parent->d_inode,
3622 inode, dentry->d_name.name,
3623 dentry->d_name.len, backref, index);
3624 if (!err) {
3625 d_instantiate(dentry, inode);
3626 return 0;
3628 if (err > 0)
3629 err = -EEXIST;
3630 return err;
3633 static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
3634 int mode, dev_t rdev)
3636 struct btrfs_trans_handle *trans;
3637 struct btrfs_root *root = BTRFS_I(dir)->root;
3638 struct inode *inode = NULL;
3639 int err;
3640 int drop_inode = 0;
3641 u64 objectid;
3642 unsigned long nr = 0;
3643 u64 index = 0;
3645 if (!new_valid_dev(rdev))
3646 return -EINVAL;
3648 err = btrfs_check_metadata_free_space(root);
3649 if (err)
3650 goto fail;
3652 trans = btrfs_start_transaction(root, 1);
3653 btrfs_set_trans_block_group(trans, dir);
3655 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
3656 if (err) {
3657 err = -ENOSPC;
3658 goto out_unlock;
3661 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
3662 dentry->d_name.len,
3663 dentry->d_parent->d_inode->i_ino, objectid,
3664 BTRFS_I(dir)->block_group, mode, &index);
3665 err = PTR_ERR(inode);
3666 if (IS_ERR(inode))
3667 goto out_unlock;
3669 err = btrfs_init_inode_security(inode, dir);
3670 if (err) {
3671 drop_inode = 1;
3672 goto out_unlock;
3675 btrfs_set_trans_block_group(trans, inode);
3676 err = btrfs_add_nondir(trans, dentry, inode, 0, index);
3677 if (err)
3678 drop_inode = 1;
3679 else {
3680 inode->i_op = &btrfs_special_inode_operations;
3681 init_special_inode(inode, inode->i_mode, rdev);
3682 btrfs_update_inode(trans, root, inode);
3684 dir->i_sb->s_dirt = 1;
3685 btrfs_update_inode_block_group(trans, inode);
3686 btrfs_update_inode_block_group(trans, dir);
3687 out_unlock:
3688 nr = trans->blocks_used;
3689 btrfs_end_transaction_throttle(trans, root);
3690 fail:
3691 if (drop_inode) {
3692 inode_dec_link_count(inode);
3693 iput(inode);
3695 btrfs_btree_balance_dirty(root, nr);
3696 return err;
3699 static int btrfs_create(struct inode *dir, struct dentry *dentry,
3700 int mode, struct nameidata *nd)
3702 struct btrfs_trans_handle *trans;
3703 struct btrfs_root *root = BTRFS_I(dir)->root;
3704 struct inode *inode = NULL;
3705 int err;
3706 int drop_inode = 0;
3707 unsigned long nr = 0;
3708 u64 objectid;
3709 u64 index = 0;
3711 err = btrfs_check_metadata_free_space(root);
3712 if (err)
3713 goto fail;
3714 trans = btrfs_start_transaction(root, 1);
3715 btrfs_set_trans_block_group(trans, dir);
3717 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
3718 if (err) {
3719 err = -ENOSPC;
3720 goto out_unlock;
3723 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
3724 dentry->d_name.len,
3725 dentry->d_parent->d_inode->i_ino,
3726 objectid, BTRFS_I(dir)->block_group, mode,
3727 &index);
3728 err = PTR_ERR(inode);
3729 if (IS_ERR(inode))
3730 goto out_unlock;
3732 err = btrfs_init_inode_security(inode, dir);
3733 if (err) {
3734 drop_inode = 1;
3735 goto out_unlock;
3738 btrfs_set_trans_block_group(trans, inode);
3739 err = btrfs_add_nondir(trans, dentry, inode, 0, index);
3740 if (err)
3741 drop_inode = 1;
3742 else {
3743 inode->i_mapping->a_ops = &btrfs_aops;
3744 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
3745 inode->i_fop = &btrfs_file_operations;
3746 inode->i_op = &btrfs_file_inode_operations;
3747 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
3749 dir->i_sb->s_dirt = 1;
3750 btrfs_update_inode_block_group(trans, inode);
3751 btrfs_update_inode_block_group(trans, dir);
3752 out_unlock:
3753 nr = trans->blocks_used;
3754 btrfs_end_transaction_throttle(trans, root);
3755 fail:
3756 if (drop_inode) {
3757 inode_dec_link_count(inode);
3758 iput(inode);
3760 btrfs_btree_balance_dirty(root, nr);
3761 return err;
3764 static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
3765 struct dentry *dentry)
3767 struct btrfs_trans_handle *trans;
3768 struct btrfs_root *root = BTRFS_I(dir)->root;
3769 struct inode *inode = old_dentry->d_inode;
3770 u64 index;
3771 unsigned long nr = 0;
3772 int err;
3773 int drop_inode = 0;
3775 if (inode->i_nlink == 0)
3776 return -ENOENT;
3778 btrfs_inc_nlink(inode);
3779 err = btrfs_check_metadata_free_space(root);
3780 if (err)
3781 goto fail;
3782 err = btrfs_set_inode_index(dir, &index);
3783 if (err)
3784 goto fail;
3786 trans = btrfs_start_transaction(root, 1);
3788 btrfs_set_trans_block_group(trans, dir);
3789 atomic_inc(&inode->i_count);
3791 err = btrfs_add_nondir(trans, dentry, inode, 1, index);
3793 if (err)
3794 drop_inode = 1;
3796 dir->i_sb->s_dirt = 1;
3797 btrfs_update_inode_block_group(trans, dir);
3798 err = btrfs_update_inode(trans, root, inode);
3800 if (err)
3801 drop_inode = 1;
3803 nr = trans->blocks_used;
3805 btrfs_log_new_name(trans, inode, NULL, dentry->d_parent);
3806 btrfs_end_transaction_throttle(trans, root);
3807 fail:
3808 if (drop_inode) {
3809 inode_dec_link_count(inode);
3810 iput(inode);
3812 btrfs_btree_balance_dirty(root, nr);
3813 return err;
3816 static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
3818 struct inode *inode = NULL;
3819 struct btrfs_trans_handle *trans;
3820 struct btrfs_root *root = BTRFS_I(dir)->root;
3821 int err = 0;
3822 int drop_on_err = 0;
3823 u64 objectid = 0;
3824 u64 index = 0;
3825 unsigned long nr = 1;
3827 err = btrfs_check_metadata_free_space(root);
3828 if (err)
3829 goto out_unlock;
3831 trans = btrfs_start_transaction(root, 1);
3832 btrfs_set_trans_block_group(trans, dir);
3834 if (IS_ERR(trans)) {
3835 err = PTR_ERR(trans);
3836 goto out_unlock;
3839 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
3840 if (err) {
3841 err = -ENOSPC;
3842 goto out_unlock;
3845 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
3846 dentry->d_name.len,
3847 dentry->d_parent->d_inode->i_ino, objectid,
3848 BTRFS_I(dir)->block_group, S_IFDIR | mode,
3849 &index);
3850 if (IS_ERR(inode)) {
3851 err = PTR_ERR(inode);
3852 goto out_fail;
3855 drop_on_err = 1;
3857 err = btrfs_init_inode_security(inode, dir);
3858 if (err)
3859 goto out_fail;
3861 inode->i_op = &btrfs_dir_inode_operations;
3862 inode->i_fop = &btrfs_dir_file_operations;
3863 btrfs_set_trans_block_group(trans, inode);
3865 btrfs_i_size_write(inode, 0);
3866 err = btrfs_update_inode(trans, root, inode);
3867 if (err)
3868 goto out_fail;
3870 err = btrfs_add_link(trans, dentry->d_parent->d_inode,
3871 inode, dentry->d_name.name,
3872 dentry->d_name.len, 0, index);
3873 if (err)
3874 goto out_fail;
3876 d_instantiate(dentry, inode);
3877 drop_on_err = 0;
3878 dir->i_sb->s_dirt = 1;
3879 btrfs_update_inode_block_group(trans, inode);
3880 btrfs_update_inode_block_group(trans, dir);
3882 out_fail:
3883 nr = trans->blocks_used;
3884 btrfs_end_transaction_throttle(trans, root);
3886 out_unlock:
3887 if (drop_on_err)
3888 iput(inode);
3889 btrfs_btree_balance_dirty(root, nr);
3890 return err;
3893 /* helper for btfs_get_extent. Given an existing extent in the tree,
3894 * and an extent that you want to insert, deal with overlap and insert
3895 * the new extent into the tree.
3897 static int merge_extent_mapping(struct extent_map_tree *em_tree,
3898 struct extent_map *existing,
3899 struct extent_map *em,
3900 u64 map_start, u64 map_len)
3902 u64 start_diff;
3904 BUG_ON(map_start < em->start || map_start >= extent_map_end(em));
3905 start_diff = map_start - em->start;
3906 em->start = map_start;
3907 em->len = map_len;
3908 if (em->block_start < EXTENT_MAP_LAST_BYTE &&
3909 !test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
3910 em->block_start += start_diff;
3911 em->block_len -= start_diff;
3913 return add_extent_mapping(em_tree, em);
3916 static noinline int uncompress_inline(struct btrfs_path *path,
3917 struct inode *inode, struct page *page,
3918 size_t pg_offset, u64 extent_offset,
3919 struct btrfs_file_extent_item *item)
3921 int ret;
3922 struct extent_buffer *leaf = path->nodes[0];
3923 char *tmp;
3924 size_t max_size;
3925 unsigned long inline_size;
3926 unsigned long ptr;
3928 WARN_ON(pg_offset != 0);
3929 max_size = btrfs_file_extent_ram_bytes(leaf, item);
3930 inline_size = btrfs_file_extent_inline_item_len(leaf,
3931 btrfs_item_nr(leaf, path->slots[0]));
3932 tmp = kmalloc(inline_size, GFP_NOFS);
3933 ptr = btrfs_file_extent_inline_start(item);
3935 read_extent_buffer(leaf, tmp, ptr, inline_size);
3937 max_size = min_t(unsigned long, PAGE_CACHE_SIZE, max_size);
3938 ret = btrfs_zlib_decompress(tmp, page, extent_offset,
3939 inline_size, max_size);
3940 if (ret) {
3941 char *kaddr = kmap_atomic(page, KM_USER0);
3942 unsigned long copy_size = min_t(u64,
3943 PAGE_CACHE_SIZE - pg_offset,
3944 max_size - extent_offset);
3945 memset(kaddr + pg_offset, 0, copy_size);
3946 kunmap_atomic(kaddr, KM_USER0);
3948 kfree(tmp);
3949 return 0;
3953 * a bit scary, this does extent mapping from logical file offset to the disk.
3954 * the ugly parts come from merging extents from the disk with the in-ram
3955 * representation. This gets more complex because of the data=ordered code,
3956 * where the in-ram extents might be locked pending data=ordered completion.
3958 * This also copies inline extents directly into the page.
3961 struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
3962 size_t pg_offset, u64 start, u64 len,
3963 int create)
3965 int ret;
3966 int err = 0;
3967 u64 bytenr;
3968 u64 extent_start = 0;
3969 u64 extent_end = 0;
3970 u64 objectid = inode->i_ino;
3971 u32 found_type;
3972 struct btrfs_path *path = NULL;
3973 struct btrfs_root *root = BTRFS_I(inode)->root;
3974 struct btrfs_file_extent_item *item;
3975 struct extent_buffer *leaf;
3976 struct btrfs_key found_key;
3977 struct extent_map *em = NULL;
3978 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
3979 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
3980 struct btrfs_trans_handle *trans = NULL;
3981 int compressed;
3983 again:
3984 spin_lock(&em_tree->lock);
3985 em = lookup_extent_mapping(em_tree, start, len);
3986 if (em)
3987 em->bdev = root->fs_info->fs_devices->latest_bdev;
3988 spin_unlock(&em_tree->lock);
3990 if (em) {
3991 if (em->start > start || em->start + em->len <= start)
3992 free_extent_map(em);
3993 else if (em->block_start == EXTENT_MAP_INLINE && page)
3994 free_extent_map(em);
3995 else
3996 goto out;
3998 em = alloc_extent_map(GFP_NOFS);
3999 if (!em) {
4000 err = -ENOMEM;
4001 goto out;
4003 em->bdev = root->fs_info->fs_devices->latest_bdev;
4004 em->start = EXTENT_MAP_HOLE;
4005 em->orig_start = EXTENT_MAP_HOLE;
4006 em->len = (u64)-1;
4007 em->block_len = (u64)-1;
4009 if (!path) {
4010 path = btrfs_alloc_path();
4011 BUG_ON(!path);
4014 ret = btrfs_lookup_file_extent(trans, root, path,
4015 objectid, start, trans != NULL);
4016 if (ret < 0) {
4017 err = ret;
4018 goto out;
4021 if (ret != 0) {
4022 if (path->slots[0] == 0)
4023 goto not_found;
4024 path->slots[0]--;
4027 leaf = path->nodes[0];
4028 item = btrfs_item_ptr(leaf, path->slots[0],
4029 struct btrfs_file_extent_item);
4030 /* are we inside the extent that was found? */
4031 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
4032 found_type = btrfs_key_type(&found_key);
4033 if (found_key.objectid != objectid ||
4034 found_type != BTRFS_EXTENT_DATA_KEY) {
4035 goto not_found;
4038 found_type = btrfs_file_extent_type(leaf, item);
4039 extent_start = found_key.offset;
4040 compressed = btrfs_file_extent_compression(leaf, item);
4041 if (found_type == BTRFS_FILE_EXTENT_REG ||
4042 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
4043 extent_end = extent_start +
4044 btrfs_file_extent_num_bytes(leaf, item);
4045 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
4046 size_t size;
4047 size = btrfs_file_extent_inline_len(leaf, item);
4048 extent_end = (extent_start + size + root->sectorsize - 1) &
4049 ~((u64)root->sectorsize - 1);
4052 if (start >= extent_end) {
4053 path->slots[0]++;
4054 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
4055 ret = btrfs_next_leaf(root, path);
4056 if (ret < 0) {
4057 err = ret;
4058 goto out;
4060 if (ret > 0)
4061 goto not_found;
4062 leaf = path->nodes[0];
4064 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
4065 if (found_key.objectid != objectid ||
4066 found_key.type != BTRFS_EXTENT_DATA_KEY)
4067 goto not_found;
4068 if (start + len <= found_key.offset)
4069 goto not_found;
4070 em->start = start;
4071 em->len = found_key.offset - start;
4072 goto not_found_em;
4075 if (found_type == BTRFS_FILE_EXTENT_REG ||
4076 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
4077 em->start = extent_start;
4078 em->len = extent_end - extent_start;
4079 em->orig_start = extent_start -
4080 btrfs_file_extent_offset(leaf, item);
4081 bytenr = btrfs_file_extent_disk_bytenr(leaf, item);
4082 if (bytenr == 0) {
4083 em->block_start = EXTENT_MAP_HOLE;
4084 goto insert;
4086 if (compressed) {
4087 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
4088 em->block_start = bytenr;
4089 em->block_len = btrfs_file_extent_disk_num_bytes(leaf,
4090 item);
4091 } else {
4092 bytenr += btrfs_file_extent_offset(leaf, item);
4093 em->block_start = bytenr;
4094 em->block_len = em->len;
4095 if (found_type == BTRFS_FILE_EXTENT_PREALLOC)
4096 set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
4098 goto insert;
4099 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
4100 unsigned long ptr;
4101 char *map;
4102 size_t size;
4103 size_t extent_offset;
4104 size_t copy_size;
4106 em->block_start = EXTENT_MAP_INLINE;
4107 if (!page || create) {
4108 em->start = extent_start;
4109 em->len = extent_end - extent_start;
4110 goto out;
4113 size = btrfs_file_extent_inline_len(leaf, item);
4114 extent_offset = page_offset(page) + pg_offset - extent_start;
4115 copy_size = min_t(u64, PAGE_CACHE_SIZE - pg_offset,
4116 size - extent_offset);
4117 em->start = extent_start + extent_offset;
4118 em->len = (copy_size + root->sectorsize - 1) &
4119 ~((u64)root->sectorsize - 1);
4120 em->orig_start = EXTENT_MAP_INLINE;
4121 if (compressed)
4122 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
4123 ptr = btrfs_file_extent_inline_start(item) + extent_offset;
4124 if (create == 0 && !PageUptodate(page)) {
4125 if (btrfs_file_extent_compression(leaf, item) ==
4126 BTRFS_COMPRESS_ZLIB) {
4127 ret = uncompress_inline(path, inode, page,
4128 pg_offset,
4129 extent_offset, item);
4130 BUG_ON(ret);
4131 } else {
4132 map = kmap(page);
4133 read_extent_buffer(leaf, map + pg_offset, ptr,
4134 copy_size);
4135 kunmap(page);
4137 flush_dcache_page(page);
4138 } else if (create && PageUptodate(page)) {
4139 if (!trans) {
4140 kunmap(page);
4141 free_extent_map(em);
4142 em = NULL;
4143 btrfs_release_path(root, path);
4144 trans = btrfs_join_transaction(root, 1);
4145 goto again;
4147 map = kmap(page);
4148 write_extent_buffer(leaf, map + pg_offset, ptr,
4149 copy_size);
4150 kunmap(page);
4151 btrfs_mark_buffer_dirty(leaf);
4153 set_extent_uptodate(io_tree, em->start,
4154 extent_map_end(em) - 1, GFP_NOFS);
4155 goto insert;
4156 } else {
4157 printk(KERN_ERR "btrfs unknown found_type %d\n", found_type);
4158 WARN_ON(1);
4160 not_found:
4161 em->start = start;
4162 em->len = len;
4163 not_found_em:
4164 em->block_start = EXTENT_MAP_HOLE;
4165 set_bit(EXTENT_FLAG_VACANCY, &em->flags);
4166 insert:
4167 btrfs_release_path(root, path);
4168 if (em->start > start || extent_map_end(em) <= start) {
4169 printk(KERN_ERR "Btrfs: bad extent! em: [%llu %llu] passed "
4170 "[%llu %llu]\n", (unsigned long long)em->start,
4171 (unsigned long long)em->len,
4172 (unsigned long long)start,
4173 (unsigned long long)len);
4174 err = -EIO;
4175 goto out;
4178 err = 0;
4179 spin_lock(&em_tree->lock);
4180 ret = add_extent_mapping(em_tree, em);
4181 /* it is possible that someone inserted the extent into the tree
4182 * while we had the lock dropped. It is also possible that
4183 * an overlapping map exists in the tree
4185 if (ret == -EEXIST) {
4186 struct extent_map *existing;
4188 ret = 0;
4190 existing = lookup_extent_mapping(em_tree, start, len);
4191 if (existing && (existing->start > start ||
4192 existing->start + existing->len <= start)) {
4193 free_extent_map(existing);
4194 existing = NULL;
4196 if (!existing) {
4197 existing = lookup_extent_mapping(em_tree, em->start,
4198 em->len);
4199 if (existing) {
4200 err = merge_extent_mapping(em_tree, existing,
4201 em, start,
4202 root->sectorsize);
4203 free_extent_map(existing);
4204 if (err) {
4205 free_extent_map(em);
4206 em = NULL;
4208 } else {
4209 err = -EIO;
4210 free_extent_map(em);
4211 em = NULL;
4213 } else {
4214 free_extent_map(em);
4215 em = existing;
4216 err = 0;
4219 spin_unlock(&em_tree->lock);
4220 out:
4221 if (path)
4222 btrfs_free_path(path);
4223 if (trans) {
4224 ret = btrfs_end_transaction(trans, root);
4225 if (!err)
4226 err = ret;
4228 if (err) {
4229 free_extent_map(em);
4230 WARN_ON(1);
4231 return ERR_PTR(err);
4233 return em;
4236 static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
4237 const struct iovec *iov, loff_t offset,
4238 unsigned long nr_segs)
4240 return -EINVAL;
4243 static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
4244 __u64 start, __u64 len)
4246 return extent_fiemap(inode, fieinfo, start, len, btrfs_get_extent);
4249 int btrfs_readpage(struct file *file, struct page *page)
4251 struct extent_io_tree *tree;
4252 tree = &BTRFS_I(page->mapping->host)->io_tree;
4253 return extent_read_full_page(tree, page, btrfs_get_extent);
4256 static int btrfs_writepage(struct page *page, struct writeback_control *wbc)
4258 struct extent_io_tree *tree;
4261 if (current->flags & PF_MEMALLOC) {
4262 redirty_page_for_writepage(wbc, page);
4263 unlock_page(page);
4264 return 0;
4266 tree = &BTRFS_I(page->mapping->host)->io_tree;
4267 return extent_write_full_page(tree, page, btrfs_get_extent, wbc);
4270 int btrfs_writepages(struct address_space *mapping,
4271 struct writeback_control *wbc)
4273 struct extent_io_tree *tree;
4275 tree = &BTRFS_I(mapping->host)->io_tree;
4276 return extent_writepages(tree, mapping, btrfs_get_extent, wbc);
4279 static int
4280 btrfs_readpages(struct file *file, struct address_space *mapping,
4281 struct list_head *pages, unsigned nr_pages)
4283 struct extent_io_tree *tree;
4284 tree = &BTRFS_I(mapping->host)->io_tree;
4285 return extent_readpages(tree, mapping, pages, nr_pages,
4286 btrfs_get_extent);
4288 static int __btrfs_releasepage(struct page *page, gfp_t gfp_flags)
4290 struct extent_io_tree *tree;
4291 struct extent_map_tree *map;
4292 int ret;
4294 tree = &BTRFS_I(page->mapping->host)->io_tree;
4295 map = &BTRFS_I(page->mapping->host)->extent_tree;
4296 ret = try_release_extent_mapping(map, tree, page, gfp_flags);
4297 if (ret == 1) {
4298 ClearPagePrivate(page);
4299 set_page_private(page, 0);
4300 page_cache_release(page);
4302 return ret;
4305 static int btrfs_releasepage(struct page *page, gfp_t gfp_flags)
4307 if (PageWriteback(page) || PageDirty(page))
4308 return 0;
4309 return __btrfs_releasepage(page, gfp_flags & GFP_NOFS);
4312 static void btrfs_invalidatepage(struct page *page, unsigned long offset)
4314 struct extent_io_tree *tree;
4315 struct btrfs_ordered_extent *ordered;
4316 u64 page_start = page_offset(page);
4317 u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
4319 wait_on_page_writeback(page);
4320 tree = &BTRFS_I(page->mapping->host)->io_tree;
4321 if (offset) {
4322 btrfs_releasepage(page, GFP_NOFS);
4323 return;
4326 lock_extent(tree, page_start, page_end, GFP_NOFS);
4327 ordered = btrfs_lookup_ordered_extent(page->mapping->host,
4328 page_offset(page));
4329 if (ordered) {
4331 * IO on this page will never be started, so we need
4332 * to account for any ordered extents now
4334 clear_extent_bit(tree, page_start, page_end,
4335 EXTENT_DIRTY | EXTENT_DELALLOC |
4336 EXTENT_LOCKED, 1, 0, GFP_NOFS);
4337 btrfs_finish_ordered_io(page->mapping->host,
4338 page_start, page_end);
4339 btrfs_put_ordered_extent(ordered);
4340 lock_extent(tree, page_start, page_end, GFP_NOFS);
4342 clear_extent_bit(tree, page_start, page_end,
4343 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
4344 EXTENT_ORDERED,
4345 1, 1, GFP_NOFS);
4346 __btrfs_releasepage(page, GFP_NOFS);
4348 ClearPageChecked(page);
4349 if (PagePrivate(page)) {
4350 ClearPagePrivate(page);
4351 set_page_private(page, 0);
4352 page_cache_release(page);
4357 * btrfs_page_mkwrite() is not allowed to change the file size as it gets
4358 * called from a page fault handler when a page is first dirtied. Hence we must
4359 * be careful to check for EOF conditions here. We set the page up correctly
4360 * for a written page which means we get ENOSPC checking when writing into
4361 * holes and correct delalloc and unwritten extent mapping on filesystems that
4362 * support these features.
4364 * We are not allowed to take the i_mutex here so we have to play games to
4365 * protect against truncate races as the page could now be beyond EOF. Because
4366 * vmtruncate() writes the inode size before removing pages, once we have the
4367 * page lock we can determine safely if the page is beyond EOF. If it is not
4368 * beyond EOF, then the page is guaranteed safe against truncation until we
4369 * unlock the page.
4371 int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
4373 struct page *page = vmf->page;
4374 struct inode *inode = fdentry(vma->vm_file)->d_inode;
4375 struct btrfs_root *root = BTRFS_I(inode)->root;
4376 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
4377 struct btrfs_ordered_extent *ordered;
4378 char *kaddr;
4379 unsigned long zero_start;
4380 loff_t size;
4381 int ret;
4382 u64 page_start;
4383 u64 page_end;
4385 ret = btrfs_check_data_free_space(root, inode, PAGE_CACHE_SIZE);
4386 if (ret) {
4387 if (ret == -ENOMEM)
4388 ret = VM_FAULT_OOM;
4389 else /* -ENOSPC, -EIO, etc */
4390 ret = VM_FAULT_SIGBUS;
4391 goto out;
4394 ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */
4395 again:
4396 lock_page(page);
4397 size = i_size_read(inode);
4398 page_start = page_offset(page);
4399 page_end = page_start + PAGE_CACHE_SIZE - 1;
4401 if ((page->mapping != inode->i_mapping) ||
4402 (page_start >= size)) {
4403 btrfs_free_reserved_data_space(root, inode, PAGE_CACHE_SIZE);
4404 /* page got truncated out from underneath us */
4405 goto out_unlock;
4407 wait_on_page_writeback(page);
4409 lock_extent(io_tree, page_start, page_end, GFP_NOFS);
4410 set_page_extent_mapped(page);
4413 * we can't set the delalloc bits if there are pending ordered
4414 * extents. Drop our locks and wait for them to finish
4416 ordered = btrfs_lookup_ordered_extent(inode, page_start);
4417 if (ordered) {
4418 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
4419 unlock_page(page);
4420 btrfs_start_ordered_extent(inode, ordered, 1);
4421 btrfs_put_ordered_extent(ordered);
4422 goto again;
4425 btrfs_set_extent_delalloc(inode, page_start, page_end);
4426 ret = 0;
4428 /* page is wholly or partially inside EOF */
4429 if (page_start + PAGE_CACHE_SIZE > size)
4430 zero_start = size & ~PAGE_CACHE_MASK;
4431 else
4432 zero_start = PAGE_CACHE_SIZE;
4434 if (zero_start != PAGE_CACHE_SIZE) {
4435 kaddr = kmap(page);
4436 memset(kaddr + zero_start, 0, PAGE_CACHE_SIZE - zero_start);
4437 flush_dcache_page(page);
4438 kunmap(page);
4440 ClearPageChecked(page);
4441 set_page_dirty(page);
4443 BTRFS_I(inode)->last_trans = root->fs_info->generation + 1;
4444 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
4446 out_unlock:
4447 unlock_page(page);
4448 out:
4449 return ret;
4452 static void btrfs_truncate(struct inode *inode)
4454 struct btrfs_root *root = BTRFS_I(inode)->root;
4455 int ret;
4456 struct btrfs_trans_handle *trans;
4457 unsigned long nr;
4458 u64 mask = root->sectorsize - 1;
4460 if (!S_ISREG(inode->i_mode))
4461 return;
4462 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
4463 return;
4465 btrfs_truncate_page(inode->i_mapping, inode->i_size);
4466 btrfs_wait_ordered_range(inode, inode->i_size & (~mask), (u64)-1);
4468 trans = btrfs_start_transaction(root, 1);
4471 * setattr is responsible for setting the ordered_data_close flag,
4472 * but that is only tested during the last file release. That
4473 * could happen well after the next commit, leaving a great big
4474 * window where new writes may get lost if someone chooses to write
4475 * to this file after truncating to zero
4477 * The inode doesn't have any dirty data here, and so if we commit
4478 * this is a noop. If someone immediately starts writing to the inode
4479 * it is very likely we'll catch some of their writes in this
4480 * transaction, and the commit will find this file on the ordered
4481 * data list with good things to send down.
4483 * This is a best effort solution, there is still a window where
4484 * using truncate to replace the contents of the file will
4485 * end up with a zero length file after a crash.
4487 if (inode->i_size == 0 && BTRFS_I(inode)->ordered_data_close)
4488 btrfs_add_ordered_operation(trans, root, inode);
4490 btrfs_set_trans_block_group(trans, inode);
4491 btrfs_i_size_write(inode, inode->i_size);
4493 ret = btrfs_orphan_add(trans, inode);
4494 if (ret)
4495 goto out;
4496 /* FIXME, add redo link to tree so we don't leak on crash */
4497 ret = btrfs_truncate_inode_items(trans, root, inode, inode->i_size,
4498 BTRFS_EXTENT_DATA_KEY);
4499 btrfs_update_inode(trans, root, inode);
4501 ret = btrfs_orphan_del(trans, inode);
4502 BUG_ON(ret);
4504 out:
4505 nr = trans->blocks_used;
4506 ret = btrfs_end_transaction_throttle(trans, root);
4507 BUG_ON(ret);
4508 btrfs_btree_balance_dirty(root, nr);
4512 * create a new subvolume directory/inode (helper for the ioctl).
4514 int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
4515 struct btrfs_root *new_root, struct dentry *dentry,
4516 u64 new_dirid, u64 alloc_hint)
4518 struct inode *inode;
4519 int error;
4520 u64 index = 0;
4522 inode = btrfs_new_inode(trans, new_root, NULL, "..", 2, new_dirid,
4523 new_dirid, alloc_hint, S_IFDIR | 0700, &index);
4524 if (IS_ERR(inode))
4525 return PTR_ERR(inode);
4526 inode->i_op = &btrfs_dir_inode_operations;
4527 inode->i_fop = &btrfs_dir_file_operations;
4529 inode->i_nlink = 1;
4530 btrfs_i_size_write(inode, 0);
4532 error = btrfs_update_inode(trans, new_root, inode);
4533 if (error)
4534 return error;
4536 d_instantiate(dentry, inode);
4537 return 0;
4540 /* helper function for file defrag and space balancing. This
4541 * forces readahead on a given range of bytes in an inode
4543 unsigned long btrfs_force_ra(struct address_space *mapping,
4544 struct file_ra_state *ra, struct file *file,
4545 pgoff_t offset, pgoff_t last_index)
4547 pgoff_t req_size = last_index - offset + 1;
4549 page_cache_sync_readahead(mapping, ra, file, offset, req_size);
4550 return offset + req_size;
4553 struct inode *btrfs_alloc_inode(struct super_block *sb)
4555 struct btrfs_inode *ei;
4557 ei = kmem_cache_alloc(btrfs_inode_cachep, GFP_NOFS);
4558 if (!ei)
4559 return NULL;
4560 ei->last_trans = 0;
4561 ei->logged_trans = 0;
4562 btrfs_ordered_inode_tree_init(&ei->ordered_tree);
4563 ei->i_acl = BTRFS_ACL_NOT_CACHED;
4564 ei->i_default_acl = BTRFS_ACL_NOT_CACHED;
4565 INIT_LIST_HEAD(&ei->i_orphan);
4566 INIT_LIST_HEAD(&ei->ordered_operations);
4567 return &ei->vfs_inode;
4570 void btrfs_destroy_inode(struct inode *inode)
4572 struct btrfs_ordered_extent *ordered;
4573 struct btrfs_root *root = BTRFS_I(inode)->root;
4575 WARN_ON(!list_empty(&inode->i_dentry));
4576 WARN_ON(inode->i_data.nrpages);
4578 if (BTRFS_I(inode)->i_acl &&
4579 BTRFS_I(inode)->i_acl != BTRFS_ACL_NOT_CACHED)
4580 posix_acl_release(BTRFS_I(inode)->i_acl);
4581 if (BTRFS_I(inode)->i_default_acl &&
4582 BTRFS_I(inode)->i_default_acl != BTRFS_ACL_NOT_CACHED)
4583 posix_acl_release(BTRFS_I(inode)->i_default_acl);
4586 * Make sure we're properly removed from the ordered operation
4587 * lists.
4589 smp_mb();
4590 if (!list_empty(&BTRFS_I(inode)->ordered_operations)) {
4591 spin_lock(&root->fs_info->ordered_extent_lock);
4592 list_del_init(&BTRFS_I(inode)->ordered_operations);
4593 spin_unlock(&root->fs_info->ordered_extent_lock);
4596 spin_lock(&root->list_lock);
4597 if (!list_empty(&BTRFS_I(inode)->i_orphan)) {
4598 printk(KERN_ERR "BTRFS: inode %lu: inode still on the orphan"
4599 " list\n", inode->i_ino);
4600 dump_stack();
4602 spin_unlock(&root->list_lock);
4604 while (1) {
4605 ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1);
4606 if (!ordered)
4607 break;
4608 else {
4609 printk(KERN_ERR "btrfs found ordered "
4610 "extent %llu %llu on inode cleanup\n",
4611 (unsigned long long)ordered->file_offset,
4612 (unsigned long long)ordered->len);
4613 btrfs_remove_ordered_extent(inode, ordered);
4614 btrfs_put_ordered_extent(ordered);
4615 btrfs_put_ordered_extent(ordered);
4618 btrfs_drop_extent_cache(inode, 0, (u64)-1, 0);
4619 kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
4622 static void init_once(void *foo)
4624 struct btrfs_inode *ei = (struct btrfs_inode *) foo;
4626 inode_init_once(&ei->vfs_inode);
4629 void btrfs_destroy_cachep(void)
4631 if (btrfs_inode_cachep)
4632 kmem_cache_destroy(btrfs_inode_cachep);
4633 if (btrfs_trans_handle_cachep)
4634 kmem_cache_destroy(btrfs_trans_handle_cachep);
4635 if (btrfs_transaction_cachep)
4636 kmem_cache_destroy(btrfs_transaction_cachep);
4637 if (btrfs_bit_radix_cachep)
4638 kmem_cache_destroy(btrfs_bit_radix_cachep);
4639 if (btrfs_path_cachep)
4640 kmem_cache_destroy(btrfs_path_cachep);
4643 struct kmem_cache *btrfs_cache_create(const char *name, size_t size,
4644 unsigned long extra_flags,
4645 void (*ctor)(void *))
4647 return kmem_cache_create(name, size, 0, (SLAB_RECLAIM_ACCOUNT |
4648 SLAB_MEM_SPREAD | extra_flags), ctor);
4651 int btrfs_init_cachep(void)
4653 btrfs_inode_cachep = btrfs_cache_create("btrfs_inode_cache",
4654 sizeof(struct btrfs_inode),
4655 0, init_once);
4656 if (!btrfs_inode_cachep)
4657 goto fail;
4658 btrfs_trans_handle_cachep =
4659 btrfs_cache_create("btrfs_trans_handle_cache",
4660 sizeof(struct btrfs_trans_handle),
4661 0, NULL);
4662 if (!btrfs_trans_handle_cachep)
4663 goto fail;
4664 btrfs_transaction_cachep = btrfs_cache_create("btrfs_transaction_cache",
4665 sizeof(struct btrfs_transaction),
4666 0, NULL);
4667 if (!btrfs_transaction_cachep)
4668 goto fail;
4669 btrfs_path_cachep = btrfs_cache_create("btrfs_path_cache",
4670 sizeof(struct btrfs_path),
4671 0, NULL);
4672 if (!btrfs_path_cachep)
4673 goto fail;
4674 btrfs_bit_radix_cachep = btrfs_cache_create("btrfs_radix", 256,
4675 SLAB_DESTROY_BY_RCU, NULL);
4676 if (!btrfs_bit_radix_cachep)
4677 goto fail;
4678 return 0;
4679 fail:
4680 btrfs_destroy_cachep();
4681 return -ENOMEM;
4684 static int btrfs_getattr(struct vfsmount *mnt,
4685 struct dentry *dentry, struct kstat *stat)
4687 struct inode *inode = dentry->d_inode;
4688 generic_fillattr(inode, stat);
4689 stat->dev = BTRFS_I(inode)->root->anon_super.s_dev;
4690 stat->blksize = PAGE_CACHE_SIZE;
4691 stat->blocks = (inode_get_bytes(inode) +
4692 BTRFS_I(inode)->delalloc_bytes) >> 9;
4693 return 0;
4696 static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
4697 struct inode *new_dir, struct dentry *new_dentry)
4699 struct btrfs_trans_handle *trans;
4700 struct btrfs_root *root = BTRFS_I(old_dir)->root;
4701 struct inode *new_inode = new_dentry->d_inode;
4702 struct inode *old_inode = old_dentry->d_inode;
4703 struct timespec ctime = CURRENT_TIME;
4704 u64 index = 0;
4705 int ret;
4707 /* we're not allowed to rename between subvolumes */
4708 if (BTRFS_I(old_inode)->root->root_key.objectid !=
4709 BTRFS_I(new_dir)->root->root_key.objectid)
4710 return -EXDEV;
4712 if (S_ISDIR(old_inode->i_mode) && new_inode &&
4713 new_inode->i_size > BTRFS_EMPTY_DIR_SIZE) {
4714 return -ENOTEMPTY;
4717 /* to rename a snapshot or subvolume, we need to juggle the
4718 * backrefs. This isn't coded yet
4720 if (old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)
4721 return -EXDEV;
4723 ret = btrfs_check_metadata_free_space(root);
4724 if (ret)
4725 goto out_unlock;
4728 * we're using rename to replace one file with another.
4729 * and the replacement file is large. Start IO on it now so
4730 * we don't add too much work to the end of the transaction
4732 if (new_inode && old_inode && S_ISREG(old_inode->i_mode) &&
4733 new_inode->i_size &&
4734 old_inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT)
4735 filemap_flush(old_inode->i_mapping);
4737 trans = btrfs_start_transaction(root, 1);
4740 * make sure the inode gets flushed if it is replacing
4741 * something.
4743 if (new_inode && new_inode->i_size &&
4744 old_inode && S_ISREG(old_inode->i_mode)) {
4745 btrfs_add_ordered_operation(trans, root, old_inode);
4749 * this is an ugly little race, but the rename is required to make
4750 * sure that if we crash, the inode is either at the old name
4751 * or the new one. pinning the log transaction lets us make sure
4752 * we don't allow a log commit to come in after we unlink the
4753 * name but before we add the new name back in.
4755 btrfs_pin_log_trans(root);
4757 btrfs_set_trans_block_group(trans, new_dir);
4759 btrfs_inc_nlink(old_dentry->d_inode);
4760 old_dir->i_ctime = old_dir->i_mtime = ctime;
4761 new_dir->i_ctime = new_dir->i_mtime = ctime;
4762 old_inode->i_ctime = ctime;
4764 if (old_dentry->d_parent != new_dentry->d_parent)
4765 btrfs_record_unlink_dir(trans, old_dir, old_inode, 1);
4767 ret = btrfs_unlink_inode(trans, root, old_dir, old_dentry->d_inode,
4768 old_dentry->d_name.name,
4769 old_dentry->d_name.len);
4770 if (ret)
4771 goto out_fail;
4773 if (new_inode) {
4774 new_inode->i_ctime = CURRENT_TIME;
4775 ret = btrfs_unlink_inode(trans, root, new_dir,
4776 new_dentry->d_inode,
4777 new_dentry->d_name.name,
4778 new_dentry->d_name.len);
4779 if (ret)
4780 goto out_fail;
4781 if (new_inode->i_nlink == 0) {
4782 ret = btrfs_orphan_add(trans, new_dentry->d_inode);
4783 if (ret)
4784 goto out_fail;
4788 ret = btrfs_set_inode_index(new_dir, &index);
4789 if (ret)
4790 goto out_fail;
4792 ret = btrfs_add_link(trans, new_dentry->d_parent->d_inode,
4793 old_inode, new_dentry->d_name.name,
4794 new_dentry->d_name.len, 1, index);
4795 if (ret)
4796 goto out_fail;
4798 btrfs_log_new_name(trans, old_inode, old_dir,
4799 new_dentry->d_parent);
4800 out_fail:
4802 /* this btrfs_end_log_trans just allows the current
4803 * log-sub transaction to complete
4805 btrfs_end_log_trans(root);
4806 btrfs_end_transaction_throttle(trans, root);
4807 out_unlock:
4808 return ret;
4812 * some fairly slow code that needs optimization. This walks the list
4813 * of all the inodes with pending delalloc and forces them to disk.
4815 int btrfs_start_delalloc_inodes(struct btrfs_root *root)
4817 struct list_head *head = &root->fs_info->delalloc_inodes;
4818 struct btrfs_inode *binode;
4819 struct inode *inode;
4821 if (root->fs_info->sb->s_flags & MS_RDONLY)
4822 return -EROFS;
4824 spin_lock(&root->fs_info->delalloc_lock);
4825 while (!list_empty(head)) {
4826 binode = list_entry(head->next, struct btrfs_inode,
4827 delalloc_inodes);
4828 inode = igrab(&binode->vfs_inode);
4829 if (!inode)
4830 list_del_init(&binode->delalloc_inodes);
4831 spin_unlock(&root->fs_info->delalloc_lock);
4832 if (inode) {
4833 filemap_flush(inode->i_mapping);
4834 iput(inode);
4836 cond_resched();
4837 spin_lock(&root->fs_info->delalloc_lock);
4839 spin_unlock(&root->fs_info->delalloc_lock);
4841 /* the filemap_flush will queue IO into the worker threads, but
4842 * we have to make sure the IO is actually started and that
4843 * ordered extents get created before we return
4845 atomic_inc(&root->fs_info->async_submit_draining);
4846 while (atomic_read(&root->fs_info->nr_async_submits) ||
4847 atomic_read(&root->fs_info->async_delalloc_pages)) {
4848 wait_event(root->fs_info->async_submit_wait,
4849 (atomic_read(&root->fs_info->nr_async_submits) == 0 &&
4850 atomic_read(&root->fs_info->async_delalloc_pages) == 0));
4852 atomic_dec(&root->fs_info->async_submit_draining);
4853 return 0;
4856 static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
4857 const char *symname)
4859 struct btrfs_trans_handle *trans;
4860 struct btrfs_root *root = BTRFS_I(dir)->root;
4861 struct btrfs_path *path;
4862 struct btrfs_key key;
4863 struct inode *inode = NULL;
4864 int err;
4865 int drop_inode = 0;
4866 u64 objectid;
4867 u64 index = 0 ;
4868 int name_len;
4869 int datasize;
4870 unsigned long ptr;
4871 struct btrfs_file_extent_item *ei;
4872 struct extent_buffer *leaf;
4873 unsigned long nr = 0;
4875 name_len = strlen(symname) + 1;
4876 if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(root))
4877 return -ENAMETOOLONG;
4879 err = btrfs_check_metadata_free_space(root);
4880 if (err)
4881 goto out_fail;
4883 trans = btrfs_start_transaction(root, 1);
4884 btrfs_set_trans_block_group(trans, dir);
4886 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
4887 if (err) {
4888 err = -ENOSPC;
4889 goto out_unlock;
4892 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
4893 dentry->d_name.len,
4894 dentry->d_parent->d_inode->i_ino, objectid,
4895 BTRFS_I(dir)->block_group, S_IFLNK|S_IRWXUGO,
4896 &index);
4897 err = PTR_ERR(inode);
4898 if (IS_ERR(inode))
4899 goto out_unlock;
4901 err = btrfs_init_inode_security(inode, dir);
4902 if (err) {
4903 drop_inode = 1;
4904 goto out_unlock;
4907 btrfs_set_trans_block_group(trans, inode);
4908 err = btrfs_add_nondir(trans, dentry, inode, 0, index);
4909 if (err)
4910 drop_inode = 1;
4911 else {
4912 inode->i_mapping->a_ops = &btrfs_aops;
4913 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
4914 inode->i_fop = &btrfs_file_operations;
4915 inode->i_op = &btrfs_file_inode_operations;
4916 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
4918 dir->i_sb->s_dirt = 1;
4919 btrfs_update_inode_block_group(trans, inode);
4920 btrfs_update_inode_block_group(trans, dir);
4921 if (drop_inode)
4922 goto out_unlock;
4924 path = btrfs_alloc_path();
4925 BUG_ON(!path);
4926 key.objectid = inode->i_ino;
4927 key.offset = 0;
4928 btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
4929 datasize = btrfs_file_extent_calc_inline_size(name_len);
4930 err = btrfs_insert_empty_item(trans, root, path, &key,
4931 datasize);
4932 if (err) {
4933 drop_inode = 1;
4934 goto out_unlock;
4936 leaf = path->nodes[0];
4937 ei = btrfs_item_ptr(leaf, path->slots[0],
4938 struct btrfs_file_extent_item);
4939 btrfs_set_file_extent_generation(leaf, ei, trans->transid);
4940 btrfs_set_file_extent_type(leaf, ei,
4941 BTRFS_FILE_EXTENT_INLINE);
4942 btrfs_set_file_extent_encryption(leaf, ei, 0);
4943 btrfs_set_file_extent_compression(leaf, ei, 0);
4944 btrfs_set_file_extent_other_encoding(leaf, ei, 0);
4945 btrfs_set_file_extent_ram_bytes(leaf, ei, name_len);
4947 ptr = btrfs_file_extent_inline_start(ei);
4948 write_extent_buffer(leaf, symname, ptr, name_len);
4949 btrfs_mark_buffer_dirty(leaf);
4950 btrfs_free_path(path);
4952 inode->i_op = &btrfs_symlink_inode_operations;
4953 inode->i_mapping->a_ops = &btrfs_symlink_aops;
4954 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
4955 inode_set_bytes(inode, name_len);
4956 btrfs_i_size_write(inode, name_len - 1);
4957 err = btrfs_update_inode(trans, root, inode);
4958 if (err)
4959 drop_inode = 1;
4961 out_unlock:
4962 nr = trans->blocks_used;
4963 btrfs_end_transaction_throttle(trans, root);
4964 out_fail:
4965 if (drop_inode) {
4966 inode_dec_link_count(inode);
4967 iput(inode);
4969 btrfs_btree_balance_dirty(root, nr);
4970 return err;
4973 static int prealloc_file_range(struct btrfs_trans_handle *trans,
4974 struct inode *inode, u64 start, u64 end,
4975 u64 alloc_hint, int mode)
4977 struct btrfs_root *root = BTRFS_I(inode)->root;
4978 struct btrfs_key ins;
4979 u64 alloc_size;
4980 u64 cur_offset = start;
4981 u64 num_bytes = end - start;
4982 int ret = 0;
4984 while (num_bytes > 0) {
4985 alloc_size = min(num_bytes, root->fs_info->max_extent);
4986 ret = btrfs_reserve_extent(trans, root, alloc_size,
4987 root->sectorsize, 0, alloc_hint,
4988 (u64)-1, &ins, 1);
4989 if (ret) {
4990 WARN_ON(1);
4991 goto out;
4993 ret = insert_reserved_file_extent(trans, inode,
4994 cur_offset, ins.objectid,
4995 ins.offset, ins.offset,
4996 ins.offset, 0, 0, 0,
4997 BTRFS_FILE_EXTENT_PREALLOC);
4998 BUG_ON(ret);
4999 num_bytes -= ins.offset;
5000 cur_offset += ins.offset;
5001 alloc_hint = ins.objectid + ins.offset;
5003 out:
5004 if (cur_offset > start) {
5005 inode->i_ctime = CURRENT_TIME;
5006 btrfs_set_flag(inode, PREALLOC);
5007 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
5008 cur_offset > i_size_read(inode))
5009 btrfs_i_size_write(inode, cur_offset);
5010 ret = btrfs_update_inode(trans, root, inode);
5011 BUG_ON(ret);
5014 return ret;
5017 static long btrfs_fallocate(struct inode *inode, int mode,
5018 loff_t offset, loff_t len)
5020 u64 cur_offset;
5021 u64 last_byte;
5022 u64 alloc_start;
5023 u64 alloc_end;
5024 u64 alloc_hint = 0;
5025 u64 mask = BTRFS_I(inode)->root->sectorsize - 1;
5026 struct extent_map *em;
5027 struct btrfs_trans_handle *trans;
5028 int ret;
5030 alloc_start = offset & ~mask;
5031 alloc_end = (offset + len + mask) & ~mask;
5034 * wait for ordered IO before we have any locks. We'll loop again
5035 * below with the locks held.
5037 btrfs_wait_ordered_range(inode, alloc_start, alloc_end - alloc_start);
5039 mutex_lock(&inode->i_mutex);
5040 if (alloc_start > inode->i_size) {
5041 ret = btrfs_cont_expand(inode, alloc_start);
5042 if (ret)
5043 goto out;
5046 while (1) {
5047 struct btrfs_ordered_extent *ordered;
5049 trans = btrfs_start_transaction(BTRFS_I(inode)->root, 1);
5050 if (!trans) {
5051 ret = -EIO;
5052 goto out;
5055 /* the extent lock is ordered inside the running
5056 * transaction
5058 lock_extent(&BTRFS_I(inode)->io_tree, alloc_start,
5059 alloc_end - 1, GFP_NOFS);
5060 ordered = btrfs_lookup_first_ordered_extent(inode,
5061 alloc_end - 1);
5062 if (ordered &&
5063 ordered->file_offset + ordered->len > alloc_start &&
5064 ordered->file_offset < alloc_end) {
5065 btrfs_put_ordered_extent(ordered);
5066 unlock_extent(&BTRFS_I(inode)->io_tree,
5067 alloc_start, alloc_end - 1, GFP_NOFS);
5068 btrfs_end_transaction(trans, BTRFS_I(inode)->root);
5071 * we can't wait on the range with the transaction
5072 * running or with the extent lock held
5074 btrfs_wait_ordered_range(inode, alloc_start,
5075 alloc_end - alloc_start);
5076 } else {
5077 if (ordered)
5078 btrfs_put_ordered_extent(ordered);
5079 break;
5083 cur_offset = alloc_start;
5084 while (1) {
5085 em = btrfs_get_extent(inode, NULL, 0, cur_offset,
5086 alloc_end - cur_offset, 0);
5087 BUG_ON(IS_ERR(em) || !em);
5088 last_byte = min(extent_map_end(em), alloc_end);
5089 last_byte = (last_byte + mask) & ~mask;
5090 if (em->block_start == EXTENT_MAP_HOLE) {
5091 ret = prealloc_file_range(trans, inode, cur_offset,
5092 last_byte, alloc_hint, mode);
5093 if (ret < 0) {
5094 free_extent_map(em);
5095 break;
5098 if (em->block_start <= EXTENT_MAP_LAST_BYTE)
5099 alloc_hint = em->block_start;
5100 free_extent_map(em);
5102 cur_offset = last_byte;
5103 if (cur_offset >= alloc_end) {
5104 ret = 0;
5105 break;
5108 unlock_extent(&BTRFS_I(inode)->io_tree, alloc_start, alloc_end - 1,
5109 GFP_NOFS);
5111 btrfs_end_transaction(trans, BTRFS_I(inode)->root);
5112 out:
5113 mutex_unlock(&inode->i_mutex);
5114 return ret;
5117 static int btrfs_set_page_dirty(struct page *page)
5119 return __set_page_dirty_nobuffers(page);
5122 static int btrfs_permission(struct inode *inode, int mask)
5124 if (btrfs_test_flag(inode, READONLY) && (mask & MAY_WRITE))
5125 return -EACCES;
5126 return generic_permission(inode, mask, btrfs_check_acl);
5129 static struct inode_operations btrfs_dir_inode_operations = {
5130 .getattr = btrfs_getattr,
5131 .lookup = btrfs_lookup,
5132 .create = btrfs_create,
5133 .unlink = btrfs_unlink,
5134 .link = btrfs_link,
5135 .mkdir = btrfs_mkdir,
5136 .rmdir = btrfs_rmdir,
5137 .rename = btrfs_rename,
5138 .symlink = btrfs_symlink,
5139 .setattr = btrfs_setattr,
5140 .mknod = btrfs_mknod,
5141 .setxattr = btrfs_setxattr,
5142 .getxattr = btrfs_getxattr,
5143 .listxattr = btrfs_listxattr,
5144 .removexattr = btrfs_removexattr,
5145 .permission = btrfs_permission,
5147 static struct inode_operations btrfs_dir_ro_inode_operations = {
5148 .lookup = btrfs_lookup,
5149 .permission = btrfs_permission,
5151 static struct file_operations btrfs_dir_file_operations = {
5152 .llseek = generic_file_llseek,
5153 .read = generic_read_dir,
5154 .readdir = btrfs_real_readdir,
5155 .unlocked_ioctl = btrfs_ioctl,
5156 #ifdef CONFIG_COMPAT
5157 .compat_ioctl = btrfs_ioctl,
5158 #endif
5159 .release = btrfs_release_file,
5160 .fsync = btrfs_sync_file,
5163 static struct extent_io_ops btrfs_extent_io_ops = {
5164 .fill_delalloc = run_delalloc_range,
5165 .submit_bio_hook = btrfs_submit_bio_hook,
5166 .merge_bio_hook = btrfs_merge_bio_hook,
5167 .readpage_end_io_hook = btrfs_readpage_end_io_hook,
5168 .writepage_end_io_hook = btrfs_writepage_end_io_hook,
5169 .writepage_start_hook = btrfs_writepage_start_hook,
5170 .readpage_io_failed_hook = btrfs_io_failed_hook,
5171 .set_bit_hook = btrfs_set_bit_hook,
5172 .clear_bit_hook = btrfs_clear_bit_hook,
5176 * btrfs doesn't support the bmap operation because swapfiles
5177 * use bmap to make a mapping of extents in the file. They assume
5178 * these extents won't change over the life of the file and they
5179 * use the bmap result to do IO directly to the drive.
5181 * the btrfs bmap call would return logical addresses that aren't
5182 * suitable for IO and they also will change frequently as COW
5183 * operations happen. So, swapfile + btrfs == corruption.
5185 * For now we're avoiding this by dropping bmap.
5187 static struct address_space_operations btrfs_aops = {
5188 .readpage = btrfs_readpage,
5189 .writepage = btrfs_writepage,
5190 .writepages = btrfs_writepages,
5191 .readpages = btrfs_readpages,
5192 .sync_page = block_sync_page,
5193 .direct_IO = btrfs_direct_IO,
5194 .invalidatepage = btrfs_invalidatepage,
5195 .releasepage = btrfs_releasepage,
5196 .set_page_dirty = btrfs_set_page_dirty,
5199 static struct address_space_operations btrfs_symlink_aops = {
5200 .readpage = btrfs_readpage,
5201 .writepage = btrfs_writepage,
5202 .invalidatepage = btrfs_invalidatepage,
5203 .releasepage = btrfs_releasepage,
5206 static struct inode_operations btrfs_file_inode_operations = {
5207 .truncate = btrfs_truncate,
5208 .getattr = btrfs_getattr,
5209 .setattr = btrfs_setattr,
5210 .setxattr = btrfs_setxattr,
5211 .getxattr = btrfs_getxattr,
5212 .listxattr = btrfs_listxattr,
5213 .removexattr = btrfs_removexattr,
5214 .permission = btrfs_permission,
5215 .fallocate = btrfs_fallocate,
5216 .fiemap = btrfs_fiemap,
5218 static struct inode_operations btrfs_special_inode_operations = {
5219 .getattr = btrfs_getattr,
5220 .setattr = btrfs_setattr,
5221 .permission = btrfs_permission,
5222 .setxattr = btrfs_setxattr,
5223 .getxattr = btrfs_getxattr,
5224 .listxattr = btrfs_listxattr,
5225 .removexattr = btrfs_removexattr,
5227 static struct inode_operations btrfs_symlink_inode_operations = {
5228 .readlink = generic_readlink,
5229 .follow_link = page_follow_link_light,
5230 .put_link = page_put_link,
5231 .permission = btrfs_permission,
5232 .setxattr = btrfs_setxattr,
5233 .getxattr = btrfs_getxattr,
5234 .listxattr = btrfs_listxattr,
5235 .removexattr = btrfs_removexattr,