2 * Copyright (C) 2008 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/kernel.h>
20 #include <linux/bio.h>
21 #include <linux/buffer_head.h>
22 #include <linux/file.h>
24 #include <linux/pagemap.h>
25 #include <linux/highmem.h>
26 #include <linux/time.h>
27 #include <linux/init.h>
28 #include <linux/string.h>
29 #include <linux/backing-dev.h>
30 #include <linux/mpage.h>
31 #include <linux/swap.h>
32 #include <linux/writeback.h>
33 #include <linux/bit_spinlock.h>
34 #include <linux/slab.h>
35 #include <linux/sched/mm.h>
38 #include "transaction.h"
39 #include "btrfs_inode.h"
41 #include "ordered-data.h"
42 #include "compression.h"
43 #include "extent_io.h"
44 #include "extent_map.h"
46 static const char* const btrfs_compress_types
[] = { "", "zlib", "lzo", "zstd" };
48 const char* btrfs_compress_type2str(enum btrfs_compression_type type
)
51 case BTRFS_COMPRESS_ZLIB
:
52 case BTRFS_COMPRESS_LZO
:
53 case BTRFS_COMPRESS_ZSTD
:
54 case BTRFS_COMPRESS_NONE
:
55 return btrfs_compress_types
[type
];
61 bool btrfs_compress_is_valid_type(const char *str
, size_t len
)
65 for (i
= 1; i
< ARRAY_SIZE(btrfs_compress_types
); i
++) {
66 size_t comp_len
= strlen(btrfs_compress_types
[i
]);
71 if (!strncmp(btrfs_compress_types
[i
], str
, comp_len
))
77 static int btrfs_decompress_bio(struct compressed_bio
*cb
);
79 static inline int compressed_bio_size(struct btrfs_fs_info
*fs_info
,
80 unsigned long disk_size
)
82 u16 csum_size
= btrfs_super_csum_size(fs_info
->super_copy
);
84 return sizeof(struct compressed_bio
) +
85 (DIV_ROUND_UP(disk_size
, fs_info
->sectorsize
)) * csum_size
;
88 static int check_compressed_csum(struct btrfs_inode
*inode
,
89 struct compressed_bio
*cb
,
97 u32
*cb_sum
= &cb
->sums
;
99 if (inode
->flags
& BTRFS_INODE_NODATASUM
)
102 for (i
= 0; i
< cb
->nr_pages
; i
++) {
103 page
= cb
->compressed_pages
[i
];
106 kaddr
= kmap_atomic(page
);
107 csum
= btrfs_csum_data(kaddr
, csum
, PAGE_SIZE
);
108 btrfs_csum_final(csum
, (u8
*)&csum
);
109 kunmap_atomic(kaddr
);
111 if (csum
!= *cb_sum
) {
112 btrfs_print_data_csum_error(inode
, disk_start
, csum
,
113 *cb_sum
, cb
->mirror_num
);
125 /* when we finish reading compressed pages from the disk, we
126 * decompress them and then run the bio end_io routines on the
127 * decompressed pages (in the inode address space).
129 * This allows the checksumming and other IO error handling routines
132 * The compressed pages are freed here, and it must be run
135 static void end_compressed_bio_read(struct bio
*bio
)
137 struct compressed_bio
*cb
= bio
->bi_private
;
141 unsigned int mirror
= btrfs_io_bio(bio
)->mirror_num
;
147 /* if there are more bios still pending for this compressed
150 if (!refcount_dec_and_test(&cb
->pending_bios
))
154 * Record the correct mirror_num in cb->orig_bio so that
155 * read-repair can work properly.
157 ASSERT(btrfs_io_bio(cb
->orig_bio
));
158 btrfs_io_bio(cb
->orig_bio
)->mirror_num
= mirror
;
159 cb
->mirror_num
= mirror
;
162 * Some IO in this cb have failed, just skip checksum as there
163 * is no way it could be correct.
169 ret
= check_compressed_csum(BTRFS_I(inode
), cb
,
170 (u64
)bio
->bi_iter
.bi_sector
<< 9);
174 /* ok, we're the last bio for this extent, lets start
177 ret
= btrfs_decompress_bio(cb
);
183 /* release the compressed pages */
185 for (index
= 0; index
< cb
->nr_pages
; index
++) {
186 page
= cb
->compressed_pages
[index
];
187 page
->mapping
= NULL
;
191 /* do io completion on the original bio */
193 bio_io_error(cb
->orig_bio
);
196 struct bio_vec
*bvec
;
199 * we have verified the checksum already, set page
200 * checked so the end_io handlers know about it
202 ASSERT(!bio_flagged(bio
, BIO_CLONED
));
203 bio_for_each_segment_all(bvec
, cb
->orig_bio
, i
)
204 SetPageChecked(bvec
->bv_page
);
206 bio_endio(cb
->orig_bio
);
209 /* finally free the cb struct */
210 kfree(cb
->compressed_pages
);
217 * Clear the writeback bits on all of the file
218 * pages for a compressed write
220 static noinline
void end_compressed_writeback(struct inode
*inode
,
221 const struct compressed_bio
*cb
)
223 unsigned long index
= cb
->start
>> PAGE_SHIFT
;
224 unsigned long end_index
= (cb
->start
+ cb
->len
- 1) >> PAGE_SHIFT
;
225 struct page
*pages
[16];
226 unsigned long nr_pages
= end_index
- index
+ 1;
231 mapping_set_error(inode
->i_mapping
, -EIO
);
233 while (nr_pages
> 0) {
234 ret
= find_get_pages_contig(inode
->i_mapping
, index
,
236 nr_pages
, ARRAY_SIZE(pages
)), pages
);
242 for (i
= 0; i
< ret
; i
++) {
244 SetPageError(pages
[i
]);
245 end_page_writeback(pages
[i
]);
251 /* the inode may be gone now */
255 * do the cleanup once all the compressed pages hit the disk.
256 * This will clear writeback on the file pages and free the compressed
259 * This also calls the writeback end hooks for the file pages so that
260 * metadata and checksums can be updated in the file.
262 static void end_compressed_bio_write(struct bio
*bio
)
264 struct extent_io_tree
*tree
;
265 struct compressed_bio
*cb
= bio
->bi_private
;
273 /* if there are more bios still pending for this compressed
276 if (!refcount_dec_and_test(&cb
->pending_bios
))
279 /* ok, we're the last bio for this extent, step one is to
280 * call back into the FS and do all the end_io operations
283 tree
= &BTRFS_I(inode
)->io_tree
;
284 cb
->compressed_pages
[0]->mapping
= cb
->inode
->i_mapping
;
285 tree
->ops
->writepage_end_io_hook(cb
->compressed_pages
[0],
287 cb
->start
+ cb
->len
- 1,
289 bio
->bi_status
? 0 : 1);
290 cb
->compressed_pages
[0]->mapping
= NULL
;
292 end_compressed_writeback(inode
, cb
);
293 /* note, our inode could be gone now */
296 * release the compressed pages, these came from alloc_page and
297 * are not attached to the inode at all
300 for (index
= 0; index
< cb
->nr_pages
; index
++) {
301 page
= cb
->compressed_pages
[index
];
302 page
->mapping
= NULL
;
306 /* finally free the cb struct */
307 kfree(cb
->compressed_pages
);
314 * worker function to build and submit bios for previously compressed pages.
315 * The corresponding pages in the inode should be marked for writeback
316 * and the compressed pages should have a reference on them for dropping
317 * when the IO is complete.
319 * This also checksums the file bytes and gets things ready for
322 blk_status_t
btrfs_submit_compressed_write(struct inode
*inode
, u64 start
,
323 unsigned long len
, u64 disk_start
,
324 unsigned long compressed_len
,
325 struct page
**compressed_pages
,
326 unsigned long nr_pages
)
328 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
329 struct bio
*bio
= NULL
;
330 struct compressed_bio
*cb
;
331 unsigned long bytes_left
;
332 struct extent_io_tree
*io_tree
= &BTRFS_I(inode
)->io_tree
;
335 u64 first_byte
= disk_start
;
336 struct block_device
*bdev
;
338 int skip_sum
= BTRFS_I(inode
)->flags
& BTRFS_INODE_NODATASUM
;
340 WARN_ON(start
& ((u64
)PAGE_SIZE
- 1));
341 cb
= kmalloc(compressed_bio_size(fs_info
, compressed_len
), GFP_NOFS
);
343 return BLK_STS_RESOURCE
;
344 refcount_set(&cb
->pending_bios
, 0);
350 cb
->compressed_pages
= compressed_pages
;
351 cb
->compressed_len
= compressed_len
;
353 cb
->nr_pages
= nr_pages
;
355 bdev
= fs_info
->fs_devices
->latest_bdev
;
357 bio
= btrfs_bio_alloc(bdev
, first_byte
);
358 bio_set_op_attrs(bio
, REQ_OP_WRITE
, 0);
359 bio
->bi_private
= cb
;
360 bio
->bi_end_io
= end_compressed_bio_write
;
361 refcount_set(&cb
->pending_bios
, 1);
363 /* create and submit bios for the compressed pages */
364 bytes_left
= compressed_len
;
365 for (pg_index
= 0; pg_index
< cb
->nr_pages
; pg_index
++) {
368 page
= compressed_pages
[pg_index
];
369 page
->mapping
= inode
->i_mapping
;
370 if (bio
->bi_iter
.bi_size
)
371 submit
= io_tree
->ops
->merge_bio_hook(page
, 0,
375 page
->mapping
= NULL
;
376 if (submit
|| bio_add_page(bio
, page
, PAGE_SIZE
, 0) <
381 * inc the count before we submit the bio so
382 * we know the end IO handler won't happen before
383 * we inc the count. Otherwise, the cb might get
384 * freed before we're done setting it up
386 refcount_inc(&cb
->pending_bios
);
387 ret
= btrfs_bio_wq_end_io(fs_info
, bio
,
388 BTRFS_WQ_ENDIO_DATA
);
389 BUG_ON(ret
); /* -ENOMEM */
392 ret
= btrfs_csum_one_bio(inode
, bio
, start
, 1);
393 BUG_ON(ret
); /* -ENOMEM */
396 ret
= btrfs_map_bio(fs_info
, bio
, 0, 1);
398 bio
->bi_status
= ret
;
404 bio
= btrfs_bio_alloc(bdev
, first_byte
);
405 bio_set_op_attrs(bio
, REQ_OP_WRITE
, 0);
406 bio
->bi_private
= cb
;
407 bio
->bi_end_io
= end_compressed_bio_write
;
408 bio_add_page(bio
, page
, PAGE_SIZE
, 0);
410 if (bytes_left
< PAGE_SIZE
) {
412 "bytes left %lu compress len %lu nr %lu",
413 bytes_left
, cb
->compressed_len
, cb
->nr_pages
);
415 bytes_left
-= PAGE_SIZE
;
416 first_byte
+= PAGE_SIZE
;
421 ret
= btrfs_bio_wq_end_io(fs_info
, bio
, BTRFS_WQ_ENDIO_DATA
);
422 BUG_ON(ret
); /* -ENOMEM */
425 ret
= btrfs_csum_one_bio(inode
, bio
, start
, 1);
426 BUG_ON(ret
); /* -ENOMEM */
429 ret
= btrfs_map_bio(fs_info
, bio
, 0, 1);
431 bio
->bi_status
= ret
;
439 static u64
bio_end_offset(struct bio
*bio
)
441 struct bio_vec
*last
= &bio
->bi_io_vec
[bio
->bi_vcnt
- 1];
443 return page_offset(last
->bv_page
) + last
->bv_len
+ last
->bv_offset
;
446 static noinline
int add_ra_bio_pages(struct inode
*inode
,
448 struct compressed_bio
*cb
)
450 unsigned long end_index
;
451 unsigned long pg_index
;
453 u64 isize
= i_size_read(inode
);
456 unsigned long nr_pages
= 0;
457 struct extent_map
*em
;
458 struct address_space
*mapping
= inode
->i_mapping
;
459 struct extent_map_tree
*em_tree
;
460 struct extent_io_tree
*tree
;
464 last_offset
= bio_end_offset(cb
->orig_bio
);
465 em_tree
= &BTRFS_I(inode
)->extent_tree
;
466 tree
= &BTRFS_I(inode
)->io_tree
;
471 end_index
= (i_size_read(inode
) - 1) >> PAGE_SHIFT
;
473 while (last_offset
< compressed_end
) {
474 pg_index
= last_offset
>> PAGE_SHIFT
;
476 if (pg_index
> end_index
)
480 page
= radix_tree_lookup(&mapping
->page_tree
, pg_index
);
482 if (page
&& !radix_tree_exceptional_entry(page
)) {
489 page
= __page_cache_alloc(mapping_gfp_constraint(mapping
,
494 if (add_to_page_cache_lru(page
, mapping
, pg_index
, GFP_NOFS
)) {
499 end
= last_offset
+ PAGE_SIZE
- 1;
501 * at this point, we have a locked page in the page cache
502 * for these bytes in the file. But, we have to make
503 * sure they map to this compressed extent on disk.
505 set_page_extent_mapped(page
);
506 lock_extent(tree
, last_offset
, end
);
507 read_lock(&em_tree
->lock
);
508 em
= lookup_extent_mapping(em_tree
, last_offset
,
510 read_unlock(&em_tree
->lock
);
512 if (!em
|| last_offset
< em
->start
||
513 (last_offset
+ PAGE_SIZE
> extent_map_end(em
)) ||
514 (em
->block_start
>> 9) != cb
->orig_bio
->bi_iter
.bi_sector
) {
516 unlock_extent(tree
, last_offset
, end
);
523 if (page
->index
== end_index
) {
525 size_t zero_offset
= isize
& (PAGE_SIZE
- 1);
529 zeros
= PAGE_SIZE
- zero_offset
;
530 userpage
= kmap_atomic(page
);
531 memset(userpage
+ zero_offset
, 0, zeros
);
532 flush_dcache_page(page
);
533 kunmap_atomic(userpage
);
537 ret
= bio_add_page(cb
->orig_bio
, page
,
540 if (ret
== PAGE_SIZE
) {
544 unlock_extent(tree
, last_offset
, end
);
550 last_offset
+= PAGE_SIZE
;
556 * for a compressed read, the bio we get passed has all the inode pages
557 * in it. We don't actually do IO on those pages but allocate new ones
558 * to hold the compressed pages on disk.
560 * bio->bi_iter.bi_sector points to the compressed extent on disk
561 * bio->bi_io_vec points to all of the inode pages
563 * After the compressed pages are read, we copy the bytes into the
564 * bio we were passed and then call the bio end_io calls
566 blk_status_t
btrfs_submit_compressed_read(struct inode
*inode
, struct bio
*bio
,
567 int mirror_num
, unsigned long bio_flags
)
569 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
570 struct extent_io_tree
*tree
;
571 struct extent_map_tree
*em_tree
;
572 struct compressed_bio
*cb
;
573 unsigned long compressed_len
;
574 unsigned long nr_pages
;
575 unsigned long pg_index
;
577 struct block_device
*bdev
;
578 struct bio
*comp_bio
;
579 u64 cur_disk_byte
= (u64
)bio
->bi_iter
.bi_sector
<< 9;
582 struct extent_map
*em
;
583 blk_status_t ret
= BLK_STS_RESOURCE
;
587 tree
= &BTRFS_I(inode
)->io_tree
;
588 em_tree
= &BTRFS_I(inode
)->extent_tree
;
590 /* we need the actual starting offset of this extent in the file */
591 read_lock(&em_tree
->lock
);
592 em
= lookup_extent_mapping(em_tree
,
593 page_offset(bio
->bi_io_vec
->bv_page
),
595 read_unlock(&em_tree
->lock
);
597 return BLK_STS_IOERR
;
599 compressed_len
= em
->block_len
;
600 cb
= kmalloc(compressed_bio_size(fs_info
, compressed_len
), GFP_NOFS
);
604 refcount_set(&cb
->pending_bios
, 0);
607 cb
->mirror_num
= mirror_num
;
610 cb
->start
= em
->orig_start
;
612 em_start
= em
->start
;
617 cb
->len
= bio
->bi_iter
.bi_size
;
618 cb
->compressed_len
= compressed_len
;
619 cb
->compress_type
= extent_compress_type(bio_flags
);
622 nr_pages
= DIV_ROUND_UP(compressed_len
, PAGE_SIZE
);
623 cb
->compressed_pages
= kcalloc(nr_pages
, sizeof(struct page
*),
625 if (!cb
->compressed_pages
)
628 bdev
= fs_info
->fs_devices
->latest_bdev
;
630 for (pg_index
= 0; pg_index
< nr_pages
; pg_index
++) {
631 cb
->compressed_pages
[pg_index
] = alloc_page(GFP_NOFS
|
633 if (!cb
->compressed_pages
[pg_index
]) {
634 faili
= pg_index
- 1;
635 ret
= BLK_STS_RESOURCE
;
639 faili
= nr_pages
- 1;
640 cb
->nr_pages
= nr_pages
;
642 add_ra_bio_pages(inode
, em_start
+ em_len
, cb
);
644 /* include any pages we added in add_ra-bio_pages */
645 cb
->len
= bio
->bi_iter
.bi_size
;
647 comp_bio
= btrfs_bio_alloc(bdev
, cur_disk_byte
);
648 bio_set_op_attrs (comp_bio
, REQ_OP_READ
, 0);
649 comp_bio
->bi_private
= cb
;
650 comp_bio
->bi_end_io
= end_compressed_bio_read
;
651 refcount_set(&cb
->pending_bios
, 1);
653 for (pg_index
= 0; pg_index
< nr_pages
; pg_index
++) {
656 page
= cb
->compressed_pages
[pg_index
];
657 page
->mapping
= inode
->i_mapping
;
658 page
->index
= em_start
>> PAGE_SHIFT
;
660 if (comp_bio
->bi_iter
.bi_size
)
661 submit
= tree
->ops
->merge_bio_hook(page
, 0,
665 page
->mapping
= NULL
;
666 if (submit
|| bio_add_page(comp_bio
, page
, PAGE_SIZE
, 0) <
670 ret
= btrfs_bio_wq_end_io(fs_info
, comp_bio
,
671 BTRFS_WQ_ENDIO_DATA
);
672 BUG_ON(ret
); /* -ENOMEM */
675 * inc the count before we submit the bio so
676 * we know the end IO handler won't happen before
677 * we inc the count. Otherwise, the cb might get
678 * freed before we're done setting it up
680 refcount_inc(&cb
->pending_bios
);
682 if (!(BTRFS_I(inode
)->flags
& BTRFS_INODE_NODATASUM
)) {
683 ret
= btrfs_lookup_bio_sums(inode
, comp_bio
,
685 BUG_ON(ret
); /* -ENOMEM */
687 sums
+= DIV_ROUND_UP(comp_bio
->bi_iter
.bi_size
,
688 fs_info
->sectorsize
);
690 ret
= btrfs_map_bio(fs_info
, comp_bio
, mirror_num
, 0);
692 comp_bio
->bi_status
= ret
;
698 comp_bio
= btrfs_bio_alloc(bdev
, cur_disk_byte
);
699 bio_set_op_attrs(comp_bio
, REQ_OP_READ
, 0);
700 comp_bio
->bi_private
= cb
;
701 comp_bio
->bi_end_io
= end_compressed_bio_read
;
703 bio_add_page(comp_bio
, page
, PAGE_SIZE
, 0);
705 cur_disk_byte
+= PAGE_SIZE
;
709 ret
= btrfs_bio_wq_end_io(fs_info
, comp_bio
, BTRFS_WQ_ENDIO_DATA
);
710 BUG_ON(ret
); /* -ENOMEM */
712 if (!(BTRFS_I(inode
)->flags
& BTRFS_INODE_NODATASUM
)) {
713 ret
= btrfs_lookup_bio_sums(inode
, comp_bio
, sums
);
714 BUG_ON(ret
); /* -ENOMEM */
717 ret
= btrfs_map_bio(fs_info
, comp_bio
, mirror_num
, 0);
719 comp_bio
->bi_status
= ret
;
728 __free_page(cb
->compressed_pages
[faili
]);
732 kfree(cb
->compressed_pages
);
741 struct list_head idle_ws
;
743 /* Number of free workspaces */
745 /* Total number of allocated workspaces */
747 /* Waiters for a free workspace */
748 wait_queue_head_t ws_wait
;
749 } btrfs_comp_ws
[BTRFS_COMPRESS_TYPES
];
751 static const struct btrfs_compress_op
* const btrfs_compress_op
[] = {
752 &btrfs_zlib_compress
,
754 &btrfs_zstd_compress
,
757 void __init
btrfs_init_compress(void)
761 for (i
= 0; i
< BTRFS_COMPRESS_TYPES
; i
++) {
762 struct list_head
*workspace
;
764 INIT_LIST_HEAD(&btrfs_comp_ws
[i
].idle_ws
);
765 spin_lock_init(&btrfs_comp_ws
[i
].ws_lock
);
766 atomic_set(&btrfs_comp_ws
[i
].total_ws
, 0);
767 init_waitqueue_head(&btrfs_comp_ws
[i
].ws_wait
);
770 * Preallocate one workspace for each compression type so
771 * we can guarantee forward progress in the worst case
773 workspace
= btrfs_compress_op
[i
]->alloc_workspace();
774 if (IS_ERR(workspace
)) {
775 pr_warn("BTRFS: cannot preallocate compression workspace, will try later\n");
777 atomic_set(&btrfs_comp_ws
[i
].total_ws
, 1);
778 btrfs_comp_ws
[i
].free_ws
= 1;
779 list_add(workspace
, &btrfs_comp_ws
[i
].idle_ws
);
785 * This finds an available workspace or allocates a new one.
786 * If it's not possible to allocate a new one, waits until there's one.
787 * Preallocation makes a forward progress guarantees and we do not return
790 static struct list_head
*find_workspace(int type
)
792 struct list_head
*workspace
;
793 int cpus
= num_online_cpus();
797 struct list_head
*idle_ws
= &btrfs_comp_ws
[idx
].idle_ws
;
798 spinlock_t
*ws_lock
= &btrfs_comp_ws
[idx
].ws_lock
;
799 atomic_t
*total_ws
= &btrfs_comp_ws
[idx
].total_ws
;
800 wait_queue_head_t
*ws_wait
= &btrfs_comp_ws
[idx
].ws_wait
;
801 int *free_ws
= &btrfs_comp_ws
[idx
].free_ws
;
804 if (!list_empty(idle_ws
)) {
805 workspace
= idle_ws
->next
;
808 spin_unlock(ws_lock
);
812 if (atomic_read(total_ws
) > cpus
) {
815 spin_unlock(ws_lock
);
816 prepare_to_wait(ws_wait
, &wait
, TASK_UNINTERRUPTIBLE
);
817 if (atomic_read(total_ws
) > cpus
&& !*free_ws
)
819 finish_wait(ws_wait
, &wait
);
822 atomic_inc(total_ws
);
823 spin_unlock(ws_lock
);
826 * Allocation helpers call vmalloc that can't use GFP_NOFS, so we have
827 * to turn it off here because we might get called from the restricted
828 * context of btrfs_compress_bio/btrfs_compress_pages
830 nofs_flag
= memalloc_nofs_save();
831 workspace
= btrfs_compress_op
[idx
]->alloc_workspace();
832 memalloc_nofs_restore(nofs_flag
);
834 if (IS_ERR(workspace
)) {
835 atomic_dec(total_ws
);
839 * Do not return the error but go back to waiting. There's a
840 * workspace preallocated for each type and the compression
841 * time is bounded so we get to a workspace eventually. This
842 * makes our caller's life easier.
844 * To prevent silent and low-probability deadlocks (when the
845 * initial preallocation fails), check if there are any
848 if (atomic_read(total_ws
) == 0) {
849 static DEFINE_RATELIMIT_STATE(_rs
,
850 /* once per minute */ 60 * HZ
,
853 if (__ratelimit(&_rs
)) {
854 pr_warn("BTRFS: no compression workspaces, low memory, retrying\n");
863 * put a workspace struct back on the list or free it if we have enough
864 * idle ones sitting around
866 static void free_workspace(int type
, struct list_head
*workspace
)
869 struct list_head
*idle_ws
= &btrfs_comp_ws
[idx
].idle_ws
;
870 spinlock_t
*ws_lock
= &btrfs_comp_ws
[idx
].ws_lock
;
871 atomic_t
*total_ws
= &btrfs_comp_ws
[idx
].total_ws
;
872 wait_queue_head_t
*ws_wait
= &btrfs_comp_ws
[idx
].ws_wait
;
873 int *free_ws
= &btrfs_comp_ws
[idx
].free_ws
;
876 if (*free_ws
<= num_online_cpus()) {
877 list_add(workspace
, idle_ws
);
879 spin_unlock(ws_lock
);
882 spin_unlock(ws_lock
);
884 btrfs_compress_op
[idx
]->free_workspace(workspace
);
885 atomic_dec(total_ws
);
888 * Make sure counter is updated before we wake up waiters.
891 if (waitqueue_active(ws_wait
))
896 * cleanup function for module exit
898 static void free_workspaces(void)
900 struct list_head
*workspace
;
903 for (i
= 0; i
< BTRFS_COMPRESS_TYPES
; i
++) {
904 while (!list_empty(&btrfs_comp_ws
[i
].idle_ws
)) {
905 workspace
= btrfs_comp_ws
[i
].idle_ws
.next
;
907 btrfs_compress_op
[i
]->free_workspace(workspace
);
908 atomic_dec(&btrfs_comp_ws
[i
].total_ws
);
914 * Given an address space and start and length, compress the bytes into @pages
915 * that are allocated on demand.
917 * @out_pages is an in/out parameter, holds maximum number of pages to allocate
918 * and returns number of actually allocated pages
920 * @total_in is used to return the number of bytes actually read. It
921 * may be smaller than the input length if we had to exit early because we
922 * ran out of room in the pages array or because we cross the
925 * @total_out is an in/out parameter, must be set to the input length and will
926 * be also used to return the total number of compressed bytes
928 * @max_out tells us the max number of bytes that we're allowed to
931 int btrfs_compress_pages(int type
, struct address_space
*mapping
,
932 u64 start
, struct page
**pages
,
933 unsigned long *out_pages
,
934 unsigned long *total_in
,
935 unsigned long *total_out
)
937 struct list_head
*workspace
;
940 workspace
= find_workspace(type
);
942 ret
= btrfs_compress_op
[type
-1]->compress_pages(workspace
, mapping
,
945 total_in
, total_out
);
946 free_workspace(type
, workspace
);
951 * pages_in is an array of pages with compressed data.
953 * disk_start is the starting logical offset of this array in the file
955 * orig_bio contains the pages from the file that we want to decompress into
957 * srclen is the number of bytes in pages_in
959 * The basic idea is that we have a bio that was created by readpages.
960 * The pages in the bio are for the uncompressed data, and they may not
961 * be contiguous. They all correspond to the range of bytes covered by
962 * the compressed extent.
964 static int btrfs_decompress_bio(struct compressed_bio
*cb
)
966 struct list_head
*workspace
;
968 int type
= cb
->compress_type
;
970 workspace
= find_workspace(type
);
971 ret
= btrfs_compress_op
[type
- 1]->decompress_bio(workspace
, cb
);
972 free_workspace(type
, workspace
);
978 * a less complex decompression routine. Our compressed data fits in a
979 * single page, and we want to read a single page out of it.
980 * start_byte tells us the offset into the compressed data we're interested in
982 int btrfs_decompress(int type
, unsigned char *data_in
, struct page
*dest_page
,
983 unsigned long start_byte
, size_t srclen
, size_t destlen
)
985 struct list_head
*workspace
;
988 workspace
= find_workspace(type
);
990 ret
= btrfs_compress_op
[type
-1]->decompress(workspace
, data_in
,
991 dest_page
, start_byte
,
994 free_workspace(type
, workspace
);
998 void btrfs_exit_compress(void)
1004 * Copy uncompressed data from working buffer to pages.
1006 * buf_start is the byte offset we're of the start of our workspace buffer.
1008 * total_out is the last byte of the buffer
1010 int btrfs_decompress_buf2page(const char *buf
, unsigned long buf_start
,
1011 unsigned long total_out
, u64 disk_start
,
1014 unsigned long buf_offset
;
1015 unsigned long current_buf_start
;
1016 unsigned long start_byte
;
1017 unsigned long prev_start_byte
;
1018 unsigned long working_bytes
= total_out
- buf_start
;
1019 unsigned long bytes
;
1021 struct bio_vec bvec
= bio_iter_iovec(bio
, bio
->bi_iter
);
1024 * start byte is the first byte of the page we're currently
1025 * copying into relative to the start of the compressed data.
1027 start_byte
= page_offset(bvec
.bv_page
) - disk_start
;
1029 /* we haven't yet hit data corresponding to this page */
1030 if (total_out
<= start_byte
)
1034 * the start of the data we care about is offset into
1035 * the middle of our working buffer
1037 if (total_out
> start_byte
&& buf_start
< start_byte
) {
1038 buf_offset
= start_byte
- buf_start
;
1039 working_bytes
-= buf_offset
;
1043 current_buf_start
= buf_start
;
1045 /* copy bytes from the working buffer into the pages */
1046 while (working_bytes
> 0) {
1047 bytes
= min_t(unsigned long, bvec
.bv_len
,
1048 PAGE_SIZE
- buf_offset
);
1049 bytes
= min(bytes
, working_bytes
);
1051 kaddr
= kmap_atomic(bvec
.bv_page
);
1052 memcpy(kaddr
+ bvec
.bv_offset
, buf
+ buf_offset
, bytes
);
1053 kunmap_atomic(kaddr
);
1054 flush_dcache_page(bvec
.bv_page
);
1056 buf_offset
+= bytes
;
1057 working_bytes
-= bytes
;
1058 current_buf_start
+= bytes
;
1060 /* check if we need to pick another page */
1061 bio_advance(bio
, bytes
);
1062 if (!bio
->bi_iter
.bi_size
)
1064 bvec
= bio_iter_iovec(bio
, bio
->bi_iter
);
1065 prev_start_byte
= start_byte
;
1066 start_byte
= page_offset(bvec
.bv_page
) - disk_start
;
1069 * We need to make sure we're only adjusting
1070 * our offset into compression working buffer when
1071 * we're switching pages. Otherwise we can incorrectly
1072 * keep copying when we were actually done.
1074 if (start_byte
!= prev_start_byte
) {
1076 * make sure our new page is covered by this
1079 if (total_out
<= start_byte
)
1083 * the next page in the biovec might not be adjacent
1084 * to the last page, but it might still be found
1085 * inside this working buffer. bump our offset pointer
1087 if (total_out
> start_byte
&&
1088 current_buf_start
< start_byte
) {
1089 buf_offset
= start_byte
- buf_start
;
1090 working_bytes
= total_out
- start_byte
;
1091 current_buf_start
= buf_start
+ buf_offset
;
1100 * Compression heuristic.
1102 * For now is's a naive and optimistic 'return true', we'll extend the logic to
1103 * quickly (compared to direct compression) detect data characteristics
1104 * (compressible/uncompressible) to avoid wasting CPU time on uncompressible
1107 * The following types of analysis can be performed:
1108 * - detect mostly zero data
1109 * - detect data with low "byte set" size (text, etc)
1110 * - detect data with low/high "core byte" set
1112 * Return non-zero if the compression should be done, 0 otherwise.
1114 int btrfs_compress_heuristic(struct inode
*inode
, u64 start
, u64 end
)
1116 u64 index
= start
>> PAGE_SHIFT
;
1117 u64 end_index
= end
>> PAGE_SHIFT
;
1121 while (index
<= end_index
) {
1122 page
= find_get_page(inode
->i_mapping
, index
);