2 * Copyright (C) 2008 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/kernel.h>
20 #include <linux/bio.h>
21 #include <linux/buffer_head.h>
22 #include <linux/file.h>
24 #include <linux/pagemap.h>
25 #include <linux/highmem.h>
26 #include <linux/time.h>
27 #include <linux/init.h>
28 #include <linux/string.h>
29 #include <linux/smp_lock.h>
30 #include <linux/backing-dev.h>
31 #include <linux/mpage.h>
32 #include <linux/swap.h>
33 #include <linux/writeback.h>
34 #include <linux/bit_spinlock.h>
35 #include <linux/pagevec.h>
39 #include "transaction.h"
40 #include "btrfs_inode.h"
42 #include "ordered-data.h"
43 #include "compression.h"
44 #include "extent_io.h"
45 #include "extent_map.h"
47 struct compressed_bio
{
48 /* number of bios pending for this compressed extent */
49 atomic_t pending_bios
;
51 /* the pages with the compressed data on them */
52 struct page
**compressed_pages
;
54 /* inode that owns this data */
57 /* starting offset in the inode for our pages */
60 /* number of bytes in the inode we're working on */
63 /* number of bytes on disk */
64 unsigned long compressed_len
;
66 /* number of compressed pages in the array */
67 unsigned long nr_pages
;
73 /* for reads, this is the bio we are copying the data into */
77 * the start of a variable length array of checksums only
83 static inline int compressed_bio_size(struct btrfs_root
*root
,
84 unsigned long disk_size
)
86 u16 csum_size
= btrfs_super_csum_size(&root
->fs_info
->super_copy
);
87 return sizeof(struct compressed_bio
) +
88 ((disk_size
+ root
->sectorsize
- 1) / root
->sectorsize
) *
92 static struct bio
*compressed_bio_alloc(struct block_device
*bdev
,
93 u64 first_byte
, gfp_t gfp_flags
)
98 nr_vecs
= bio_get_nr_vecs(bdev
);
99 bio
= bio_alloc(gfp_flags
, nr_vecs
);
101 if (bio
== NULL
&& (current
->flags
& PF_MEMALLOC
)) {
102 while (!bio
&& (nr_vecs
/= 2))
103 bio
= bio_alloc(gfp_flags
, nr_vecs
);
109 bio
->bi_sector
= first_byte
>> 9;
114 static int check_compressed_csum(struct inode
*inode
,
115 struct compressed_bio
*cb
,
119 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
124 u32
*cb_sum
= &cb
->sums
;
126 if (BTRFS_I(inode
)->flags
& BTRFS_INODE_NODATASUM
)
129 for (i
= 0; i
< cb
->nr_pages
; i
++) {
130 page
= cb
->compressed_pages
[i
];
133 kaddr
= kmap_atomic(page
, KM_USER0
);
134 csum
= btrfs_csum_data(root
, kaddr
, csum
, PAGE_CACHE_SIZE
);
135 btrfs_csum_final(csum
, (char *)&csum
);
136 kunmap_atomic(kaddr
, KM_USER0
);
138 if (csum
!= *cb_sum
) {
139 printk(KERN_INFO
"btrfs csum failed ino %lu "
140 "extent %llu csum %u "
141 "wanted %u mirror %d\n", inode
->i_ino
,
142 (unsigned long long)disk_start
,
143 csum
, *cb_sum
, cb
->mirror_num
);
155 /* when we finish reading compressed pages from the disk, we
156 * decompress them and then run the bio end_io routines on the
157 * decompressed pages (in the inode address space).
159 * This allows the checksumming and other IO error handling routines
162 * The compressed pages are freed here, and it must be run
165 static void end_compressed_bio_read(struct bio
*bio
, int err
)
167 struct extent_io_tree
*tree
;
168 struct compressed_bio
*cb
= bio
->bi_private
;
177 /* if there are more bios still pending for this compressed
180 if (!atomic_dec_and_test(&cb
->pending_bios
))
184 ret
= check_compressed_csum(inode
, cb
, (u64
)bio
->bi_sector
<< 9);
188 /* ok, we're the last bio for this extent, lets start
191 tree
= &BTRFS_I(inode
)->io_tree
;
192 ret
= btrfs_zlib_decompress_biovec(cb
->compressed_pages
,
194 cb
->orig_bio
->bi_io_vec
,
195 cb
->orig_bio
->bi_vcnt
,
201 /* release the compressed pages */
203 for (index
= 0; index
< cb
->nr_pages
; index
++) {
204 page
= cb
->compressed_pages
[index
];
205 page
->mapping
= NULL
;
206 page_cache_release(page
);
209 /* do io completion on the original bio */
211 bio_io_error(cb
->orig_bio
);
214 struct bio_vec
*bvec
= cb
->orig_bio
->bi_io_vec
;
217 * we have verified the checksum already, set page
218 * checked so the end_io handlers know about it
220 while (bio_index
< cb
->orig_bio
->bi_vcnt
) {
221 SetPageChecked(bvec
->bv_page
);
225 bio_endio(cb
->orig_bio
, 0);
228 /* finally free the cb struct */
229 kfree(cb
->compressed_pages
);
236 * Clear the writeback bits on all of the file
237 * pages for a compressed write
239 static noinline
int end_compressed_writeback(struct inode
*inode
, u64 start
,
240 unsigned long ram_size
)
242 unsigned long index
= start
>> PAGE_CACHE_SHIFT
;
243 unsigned long end_index
= (start
+ ram_size
- 1) >> PAGE_CACHE_SHIFT
;
244 struct page
*pages
[16];
245 unsigned long nr_pages
= end_index
- index
+ 1;
249 while (nr_pages
> 0) {
250 ret
= find_get_pages_contig(inode
->i_mapping
, index
,
252 nr_pages
, ARRAY_SIZE(pages
)), pages
);
258 for (i
= 0; i
< ret
; i
++) {
259 end_page_writeback(pages
[i
]);
260 page_cache_release(pages
[i
]);
265 /* the inode may be gone now */
270 * do the cleanup once all the compressed pages hit the disk.
271 * This will clear writeback on the file pages and free the compressed
274 * This also calls the writeback end hooks for the file pages so that
275 * metadata and checksums can be updated in the file.
277 static void end_compressed_bio_write(struct bio
*bio
, int err
)
279 struct extent_io_tree
*tree
;
280 struct compressed_bio
*cb
= bio
->bi_private
;
288 /* if there are more bios still pending for this compressed
291 if (!atomic_dec_and_test(&cb
->pending_bios
))
294 /* ok, we're the last bio for this extent, step one is to
295 * call back into the FS and do all the end_io operations
298 tree
= &BTRFS_I(inode
)->io_tree
;
299 cb
->compressed_pages
[0]->mapping
= cb
->inode
->i_mapping
;
300 tree
->ops
->writepage_end_io_hook(cb
->compressed_pages
[0],
302 cb
->start
+ cb
->len
- 1,
304 cb
->compressed_pages
[0]->mapping
= NULL
;
306 end_compressed_writeback(inode
, cb
->start
, cb
->len
);
307 /* note, our inode could be gone now */
310 * release the compressed pages, these came from alloc_page and
311 * are not attached to the inode at all
314 for (index
= 0; index
< cb
->nr_pages
; index
++) {
315 page
= cb
->compressed_pages
[index
];
316 page
->mapping
= NULL
;
317 page_cache_release(page
);
320 /* finally free the cb struct */
321 kfree(cb
->compressed_pages
);
328 * worker function to build and submit bios for previously compressed pages.
329 * The corresponding pages in the inode should be marked for writeback
330 * and the compressed pages should have a reference on them for dropping
331 * when the IO is complete.
333 * This also checksums the file bytes and gets things ready for
336 int btrfs_submit_compressed_write(struct inode
*inode
, u64 start
,
337 unsigned long len
, u64 disk_start
,
338 unsigned long compressed_len
,
339 struct page
**compressed_pages
,
340 unsigned long nr_pages
)
342 struct bio
*bio
= NULL
;
343 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
344 struct compressed_bio
*cb
;
345 unsigned long bytes_left
;
346 struct extent_io_tree
*io_tree
= &BTRFS_I(inode
)->io_tree
;
349 u64 first_byte
= disk_start
;
350 struct block_device
*bdev
;
353 WARN_ON(start
& ((u64
)PAGE_CACHE_SIZE
- 1));
354 cb
= kmalloc(compressed_bio_size(root
, compressed_len
), GFP_NOFS
);
355 atomic_set(&cb
->pending_bios
, 0);
361 cb
->compressed_pages
= compressed_pages
;
362 cb
->compressed_len
= compressed_len
;
364 cb
->nr_pages
= nr_pages
;
366 bdev
= BTRFS_I(inode
)->root
->fs_info
->fs_devices
->latest_bdev
;
368 bio
= compressed_bio_alloc(bdev
, first_byte
, GFP_NOFS
);
369 bio
->bi_private
= cb
;
370 bio
->bi_end_io
= end_compressed_bio_write
;
371 atomic_inc(&cb
->pending_bios
);
373 /* create and submit bios for the compressed pages */
374 bytes_left
= compressed_len
;
375 for (page_index
= 0; page_index
< cb
->nr_pages
; page_index
++) {
376 page
= compressed_pages
[page_index
];
377 page
->mapping
= inode
->i_mapping
;
379 ret
= io_tree
->ops
->merge_bio_hook(page
, 0,
385 page
->mapping
= NULL
;
386 if (ret
|| bio_add_page(bio
, page
, PAGE_CACHE_SIZE
, 0) <
391 * inc the count before we submit the bio so
392 * we know the end IO handler won't happen before
393 * we inc the count. Otherwise, the cb might get
394 * freed before we're done setting it up
396 atomic_inc(&cb
->pending_bios
);
397 ret
= btrfs_bio_wq_end_io(root
->fs_info
, bio
, 0);
400 ret
= btrfs_csum_one_bio(root
, inode
, bio
, start
, 1);
403 ret
= btrfs_map_bio(root
, WRITE
, bio
, 0, 1);
408 bio
= compressed_bio_alloc(bdev
, first_byte
, GFP_NOFS
);
409 bio
->bi_private
= cb
;
410 bio
->bi_end_io
= end_compressed_bio_write
;
411 bio_add_page(bio
, page
, PAGE_CACHE_SIZE
, 0);
413 if (bytes_left
< PAGE_CACHE_SIZE
) {
414 printk("bytes left %lu compress len %lu nr %lu\n",
415 bytes_left
, cb
->compressed_len
, cb
->nr_pages
);
417 bytes_left
-= PAGE_CACHE_SIZE
;
418 first_byte
+= PAGE_CACHE_SIZE
;
423 ret
= btrfs_bio_wq_end_io(root
->fs_info
, bio
, 0);
426 ret
= btrfs_csum_one_bio(root
, inode
, bio
, start
, 1);
429 ret
= btrfs_map_bio(root
, WRITE
, bio
, 0, 1);
436 static noinline
int add_ra_bio_pages(struct inode
*inode
,
438 struct compressed_bio
*cb
)
440 unsigned long end_index
;
441 unsigned long page_index
;
443 u64 isize
= i_size_read(inode
);
446 unsigned long nr_pages
= 0;
447 struct extent_map
*em
;
448 struct address_space
*mapping
= inode
->i_mapping
;
450 struct extent_map_tree
*em_tree
;
451 struct extent_io_tree
*tree
;
455 page
= cb
->orig_bio
->bi_io_vec
[cb
->orig_bio
->bi_vcnt
- 1].bv_page
;
456 last_offset
= (page_offset(page
) + PAGE_CACHE_SIZE
);
457 em_tree
= &BTRFS_I(inode
)->extent_tree
;
458 tree
= &BTRFS_I(inode
)->io_tree
;
463 end_index
= (i_size_read(inode
) - 1) >> PAGE_CACHE_SHIFT
;
465 pagevec_init(&pvec
, 0);
466 while (last_offset
< compressed_end
) {
467 page_index
= last_offset
>> PAGE_CACHE_SHIFT
;
469 if (page_index
> end_index
)
473 page
= radix_tree_lookup(&mapping
->page_tree
, page_index
);
482 page
= alloc_page(mapping_gfp_mask(mapping
) | GFP_NOFS
);
486 page
->index
= page_index
;
488 * what we want to do here is call add_to_page_cache_lru,
489 * but that isn't exported, so we reproduce it here
491 if (add_to_page_cache(page
, mapping
,
492 page
->index
, GFP_NOFS
)) {
493 page_cache_release(page
);
497 /* open coding of lru_cache_add, also not exported */
498 page_cache_get(page
);
499 if (!pagevec_add(&pvec
, page
))
500 __pagevec_lru_add_file(&pvec
);
502 end
= last_offset
+ PAGE_CACHE_SIZE
- 1;
504 * at this point, we have a locked page in the page cache
505 * for these bytes in the file. But, we have to make
506 * sure they map to this compressed extent on disk.
508 set_page_extent_mapped(page
);
509 lock_extent(tree
, last_offset
, end
, GFP_NOFS
);
510 spin_lock(&em_tree
->lock
);
511 em
= lookup_extent_mapping(em_tree
, last_offset
,
513 spin_unlock(&em_tree
->lock
);
515 if (!em
|| last_offset
< em
->start
||
516 (last_offset
+ PAGE_CACHE_SIZE
> extent_map_end(em
)) ||
517 (em
->block_start
>> 9) != cb
->orig_bio
->bi_sector
) {
519 unlock_extent(tree
, last_offset
, end
, GFP_NOFS
);
521 page_cache_release(page
);
526 if (page
->index
== end_index
) {
528 size_t zero_offset
= isize
& (PAGE_CACHE_SIZE
- 1);
532 zeros
= PAGE_CACHE_SIZE
- zero_offset
;
533 userpage
= kmap_atomic(page
, KM_USER0
);
534 memset(userpage
+ zero_offset
, 0, zeros
);
535 flush_dcache_page(page
);
536 kunmap_atomic(userpage
, KM_USER0
);
540 ret
= bio_add_page(cb
->orig_bio
, page
,
543 if (ret
== PAGE_CACHE_SIZE
) {
545 page_cache_release(page
);
547 unlock_extent(tree
, last_offset
, end
, GFP_NOFS
);
549 page_cache_release(page
);
553 last_offset
+= PAGE_CACHE_SIZE
;
555 if (pagevec_count(&pvec
))
556 __pagevec_lru_add_file(&pvec
);
561 * for a compressed read, the bio we get passed has all the inode pages
562 * in it. We don't actually do IO on those pages but allocate new ones
563 * to hold the compressed pages on disk.
565 * bio->bi_sector points to the compressed extent on disk
566 * bio->bi_io_vec points to all of the inode pages
567 * bio->bi_vcnt is a count of pages
569 * After the compressed pages are read, we copy the bytes into the
570 * bio we were passed and then call the bio end_io calls
572 int btrfs_submit_compressed_read(struct inode
*inode
, struct bio
*bio
,
573 int mirror_num
, unsigned long bio_flags
)
575 struct extent_io_tree
*tree
;
576 struct extent_map_tree
*em_tree
;
577 struct compressed_bio
*cb
;
578 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
579 unsigned long uncompressed_len
= bio
->bi_vcnt
* PAGE_CACHE_SIZE
;
580 unsigned long compressed_len
;
581 unsigned long nr_pages
;
582 unsigned long page_index
;
584 struct block_device
*bdev
;
585 struct bio
*comp_bio
;
586 u64 cur_disk_byte
= (u64
)bio
->bi_sector
<< 9;
589 struct extent_map
*em
;
593 tree
= &BTRFS_I(inode
)->io_tree
;
594 em_tree
= &BTRFS_I(inode
)->extent_tree
;
596 /* we need the actual starting offset of this extent in the file */
597 spin_lock(&em_tree
->lock
);
598 em
= lookup_extent_mapping(em_tree
,
599 page_offset(bio
->bi_io_vec
->bv_page
),
601 spin_unlock(&em_tree
->lock
);
603 compressed_len
= em
->block_len
;
604 cb
= kmalloc(compressed_bio_size(root
, compressed_len
), GFP_NOFS
);
605 atomic_set(&cb
->pending_bios
, 0);
608 cb
->mirror_num
= mirror_num
;
611 cb
->start
= em
->orig_start
;
613 em_start
= em
->start
;
618 cb
->len
= uncompressed_len
;
619 cb
->compressed_len
= compressed_len
;
622 nr_pages
= (compressed_len
+ PAGE_CACHE_SIZE
- 1) /
624 cb
->compressed_pages
= kmalloc(sizeof(struct page
*) * nr_pages
,
626 bdev
= BTRFS_I(inode
)->root
->fs_info
->fs_devices
->latest_bdev
;
628 for (page_index
= 0; page_index
< nr_pages
; page_index
++) {
629 cb
->compressed_pages
[page_index
] = alloc_page(GFP_NOFS
|
632 cb
->nr_pages
= nr_pages
;
634 add_ra_bio_pages(inode
, em_start
+ em_len
, cb
);
636 /* include any pages we added in add_ra-bio_pages */
637 uncompressed_len
= bio
->bi_vcnt
* PAGE_CACHE_SIZE
;
638 cb
->len
= uncompressed_len
;
640 comp_bio
= compressed_bio_alloc(bdev
, cur_disk_byte
, GFP_NOFS
);
641 comp_bio
->bi_private
= cb
;
642 comp_bio
->bi_end_io
= end_compressed_bio_read
;
643 atomic_inc(&cb
->pending_bios
);
645 for (page_index
= 0; page_index
< nr_pages
; page_index
++) {
646 page
= cb
->compressed_pages
[page_index
];
647 page
->mapping
= inode
->i_mapping
;
648 page
->index
= em_start
>> PAGE_CACHE_SHIFT
;
650 if (comp_bio
->bi_size
)
651 ret
= tree
->ops
->merge_bio_hook(page
, 0,
657 page
->mapping
= NULL
;
658 if (ret
|| bio_add_page(comp_bio
, page
, PAGE_CACHE_SIZE
, 0) <
662 ret
= btrfs_bio_wq_end_io(root
->fs_info
, comp_bio
, 0);
666 * inc the count before we submit the bio so
667 * we know the end IO handler won't happen before
668 * we inc the count. Otherwise, the cb might get
669 * freed before we're done setting it up
671 atomic_inc(&cb
->pending_bios
);
673 if (!(BTRFS_I(inode
)->flags
& BTRFS_INODE_NODATASUM
)) {
674 btrfs_lookup_bio_sums(root
, inode
, comp_bio
,
677 sums
+= (comp_bio
->bi_size
+ root
->sectorsize
- 1) /
680 ret
= btrfs_map_bio(root
, READ
, comp_bio
,
686 comp_bio
= compressed_bio_alloc(bdev
, cur_disk_byte
,
688 comp_bio
->bi_private
= cb
;
689 comp_bio
->bi_end_io
= end_compressed_bio_read
;
691 bio_add_page(comp_bio
, page
, PAGE_CACHE_SIZE
, 0);
693 cur_disk_byte
+= PAGE_CACHE_SIZE
;
697 ret
= btrfs_bio_wq_end_io(root
->fs_info
, comp_bio
, 0);
700 if (!(BTRFS_I(inode
)->flags
& BTRFS_INODE_NODATASUM
))
701 btrfs_lookup_bio_sums(root
, inode
, comp_bio
, sums
);
703 ret
= btrfs_map_bio(root
, READ
, comp_bio
, mirror_num
, 0);