2 * Copyright (C) 2008 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/kernel.h>
20 #include <linux/bio.h>
21 #include <linux/buffer_head.h>
22 #include <linux/file.h>
24 #include <linux/pagemap.h>
25 #include <linux/highmem.h>
26 #include <linux/time.h>
27 #include <linux/init.h>
28 #include <linux/string.h>
29 #include <linux/backing-dev.h>
30 #include <linux/mpage.h>
31 #include <linux/swap.h>
32 #include <linux/writeback.h>
33 #include <linux/bit_spinlock.h>
34 #include <linux/slab.h>
35 #include <linux/sched/mm.h>
36 #include <linux/log2.h>
39 #include "transaction.h"
40 #include "btrfs_inode.h"
42 #include "ordered-data.h"
43 #include "compression.h"
44 #include "extent_io.h"
45 #include "extent_map.h"
47 static const char* const btrfs_compress_types
[] = { "", "zlib", "lzo", "zstd" };
49 const char* btrfs_compress_type2str(enum btrfs_compression_type type
)
52 case BTRFS_COMPRESS_ZLIB
:
53 case BTRFS_COMPRESS_LZO
:
54 case BTRFS_COMPRESS_ZSTD
:
55 case BTRFS_COMPRESS_NONE
:
56 return btrfs_compress_types
[type
];
62 static int btrfs_decompress_bio(struct compressed_bio
*cb
);
64 static inline int compressed_bio_size(struct btrfs_fs_info
*fs_info
,
65 unsigned long disk_size
)
67 u16 csum_size
= btrfs_super_csum_size(fs_info
->super_copy
);
69 return sizeof(struct compressed_bio
) +
70 (DIV_ROUND_UP(disk_size
, fs_info
->sectorsize
)) * csum_size
;
73 static int check_compressed_csum(struct btrfs_inode
*inode
,
74 struct compressed_bio
*cb
,
82 u32
*cb_sum
= &cb
->sums
;
84 if (inode
->flags
& BTRFS_INODE_NODATASUM
)
87 for (i
= 0; i
< cb
->nr_pages
; i
++) {
88 page
= cb
->compressed_pages
[i
];
91 kaddr
= kmap_atomic(page
);
92 csum
= btrfs_csum_data(kaddr
, csum
, PAGE_SIZE
);
93 btrfs_csum_final(csum
, (u8
*)&csum
);
96 if (csum
!= *cb_sum
) {
97 btrfs_print_data_csum_error(inode
, disk_start
, csum
,
98 *cb_sum
, cb
->mirror_num
);
110 /* when we finish reading compressed pages from the disk, we
111 * decompress them and then run the bio end_io routines on the
112 * decompressed pages (in the inode address space).
114 * This allows the checksumming and other IO error handling routines
117 * The compressed pages are freed here, and it must be run
120 static void end_compressed_bio_read(struct bio
*bio
)
122 struct compressed_bio
*cb
= bio
->bi_private
;
126 unsigned int mirror
= btrfs_io_bio(bio
)->mirror_num
;
132 /* if there are more bios still pending for this compressed
135 if (!refcount_dec_and_test(&cb
->pending_bios
))
139 * Record the correct mirror_num in cb->orig_bio so that
140 * read-repair can work properly.
142 ASSERT(btrfs_io_bio(cb
->orig_bio
));
143 btrfs_io_bio(cb
->orig_bio
)->mirror_num
= mirror
;
144 cb
->mirror_num
= mirror
;
147 * Some IO in this cb have failed, just skip checksum as there
148 * is no way it could be correct.
154 ret
= check_compressed_csum(BTRFS_I(inode
), cb
,
155 (u64
)bio
->bi_iter
.bi_sector
<< 9);
159 /* ok, we're the last bio for this extent, lets start
162 ret
= btrfs_decompress_bio(cb
);
168 /* release the compressed pages */
170 for (index
= 0; index
< cb
->nr_pages
; index
++) {
171 page
= cb
->compressed_pages
[index
];
172 page
->mapping
= NULL
;
176 /* do io completion on the original bio */
178 bio_io_error(cb
->orig_bio
);
181 struct bio_vec
*bvec
;
184 * we have verified the checksum already, set page
185 * checked so the end_io handlers know about it
187 ASSERT(!bio_flagged(bio
, BIO_CLONED
));
188 bio_for_each_segment_all(bvec
, cb
->orig_bio
, i
)
189 SetPageChecked(bvec
->bv_page
);
191 bio_endio(cb
->orig_bio
);
194 /* finally free the cb struct */
195 kfree(cb
->compressed_pages
);
202 * Clear the writeback bits on all of the file
203 * pages for a compressed write
205 static noinline
void end_compressed_writeback(struct inode
*inode
,
206 const struct compressed_bio
*cb
)
208 unsigned long index
= cb
->start
>> PAGE_SHIFT
;
209 unsigned long end_index
= (cb
->start
+ cb
->len
- 1) >> PAGE_SHIFT
;
210 struct page
*pages
[16];
211 unsigned long nr_pages
= end_index
- index
+ 1;
216 mapping_set_error(inode
->i_mapping
, -EIO
);
218 while (nr_pages
> 0) {
219 ret
= find_get_pages_contig(inode
->i_mapping
, index
,
221 nr_pages
, ARRAY_SIZE(pages
)), pages
);
227 for (i
= 0; i
< ret
; i
++) {
229 SetPageError(pages
[i
]);
230 end_page_writeback(pages
[i
]);
236 /* the inode may be gone now */
240 * do the cleanup once all the compressed pages hit the disk.
241 * This will clear writeback on the file pages and free the compressed
244 * This also calls the writeback end hooks for the file pages so that
245 * metadata and checksums can be updated in the file.
247 static void end_compressed_bio_write(struct bio
*bio
)
249 struct extent_io_tree
*tree
;
250 struct compressed_bio
*cb
= bio
->bi_private
;
258 /* if there are more bios still pending for this compressed
261 if (!refcount_dec_and_test(&cb
->pending_bios
))
264 /* ok, we're the last bio for this extent, step one is to
265 * call back into the FS and do all the end_io operations
268 tree
= &BTRFS_I(inode
)->io_tree
;
269 cb
->compressed_pages
[0]->mapping
= cb
->inode
->i_mapping
;
270 tree
->ops
->writepage_end_io_hook(cb
->compressed_pages
[0],
272 cb
->start
+ cb
->len
- 1,
275 BLK_STS_OK
: BLK_STS_NOTSUPP
);
276 cb
->compressed_pages
[0]->mapping
= NULL
;
278 end_compressed_writeback(inode
, cb
);
279 /* note, our inode could be gone now */
282 * release the compressed pages, these came from alloc_page and
283 * are not attached to the inode at all
286 for (index
= 0; index
< cb
->nr_pages
; index
++) {
287 page
= cb
->compressed_pages
[index
];
288 page
->mapping
= NULL
;
292 /* finally free the cb struct */
293 kfree(cb
->compressed_pages
);
300 * worker function to build and submit bios for previously compressed pages.
301 * The corresponding pages in the inode should be marked for writeback
302 * and the compressed pages should have a reference on them for dropping
303 * when the IO is complete.
305 * This also checksums the file bytes and gets things ready for
308 blk_status_t
btrfs_submit_compressed_write(struct inode
*inode
, u64 start
,
309 unsigned long len
, u64 disk_start
,
310 unsigned long compressed_len
,
311 struct page
**compressed_pages
,
312 unsigned long nr_pages
,
313 unsigned int write_flags
)
315 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
316 struct bio
*bio
= NULL
;
317 struct compressed_bio
*cb
;
318 unsigned long bytes_left
;
319 struct extent_io_tree
*io_tree
= &BTRFS_I(inode
)->io_tree
;
322 u64 first_byte
= disk_start
;
323 struct block_device
*bdev
;
325 int skip_sum
= BTRFS_I(inode
)->flags
& BTRFS_INODE_NODATASUM
;
327 WARN_ON(start
& ((u64
)PAGE_SIZE
- 1));
328 cb
= kmalloc(compressed_bio_size(fs_info
, compressed_len
), GFP_NOFS
);
330 return BLK_STS_RESOURCE
;
331 refcount_set(&cb
->pending_bios
, 0);
337 cb
->compressed_pages
= compressed_pages
;
338 cb
->compressed_len
= compressed_len
;
340 cb
->nr_pages
= nr_pages
;
342 bdev
= fs_info
->fs_devices
->latest_bdev
;
344 bio
= btrfs_bio_alloc(bdev
, first_byte
);
345 bio
->bi_opf
= REQ_OP_WRITE
| write_flags
;
346 bio
->bi_private
= cb
;
347 bio
->bi_end_io
= end_compressed_bio_write
;
348 refcount_set(&cb
->pending_bios
, 1);
350 /* create and submit bios for the compressed pages */
351 bytes_left
= compressed_len
;
352 for (pg_index
= 0; pg_index
< cb
->nr_pages
; pg_index
++) {
355 page
= compressed_pages
[pg_index
];
356 page
->mapping
= inode
->i_mapping
;
357 if (bio
->bi_iter
.bi_size
)
358 submit
= io_tree
->ops
->merge_bio_hook(page
, 0,
362 page
->mapping
= NULL
;
363 if (submit
|| bio_add_page(bio
, page
, PAGE_SIZE
, 0) <
366 * inc the count before we submit the bio so
367 * we know the end IO handler won't happen before
368 * we inc the count. Otherwise, the cb might get
369 * freed before we're done setting it up
371 refcount_inc(&cb
->pending_bios
);
372 ret
= btrfs_bio_wq_end_io(fs_info
, bio
,
373 BTRFS_WQ_ENDIO_DATA
);
374 BUG_ON(ret
); /* -ENOMEM */
377 ret
= btrfs_csum_one_bio(inode
, bio
, start
, 1);
378 BUG_ON(ret
); /* -ENOMEM */
381 ret
= btrfs_map_bio(fs_info
, bio
, 0, 1);
383 bio
->bi_status
= ret
;
387 bio
= btrfs_bio_alloc(bdev
, first_byte
);
388 bio
->bi_opf
= REQ_OP_WRITE
| write_flags
;
389 bio
->bi_private
= cb
;
390 bio
->bi_end_io
= end_compressed_bio_write
;
391 bio_add_page(bio
, page
, PAGE_SIZE
, 0);
393 if (bytes_left
< PAGE_SIZE
) {
395 "bytes left %lu compress len %lu nr %lu",
396 bytes_left
, cb
->compressed_len
, cb
->nr_pages
);
398 bytes_left
-= PAGE_SIZE
;
399 first_byte
+= PAGE_SIZE
;
403 ret
= btrfs_bio_wq_end_io(fs_info
, bio
, BTRFS_WQ_ENDIO_DATA
);
404 BUG_ON(ret
); /* -ENOMEM */
407 ret
= btrfs_csum_one_bio(inode
, bio
, start
, 1);
408 BUG_ON(ret
); /* -ENOMEM */
411 ret
= btrfs_map_bio(fs_info
, bio
, 0, 1);
413 bio
->bi_status
= ret
;
420 static u64
bio_end_offset(struct bio
*bio
)
422 struct bio_vec
*last
= bio_last_bvec_all(bio
);
424 return page_offset(last
->bv_page
) + last
->bv_len
+ last
->bv_offset
;
427 static noinline
int add_ra_bio_pages(struct inode
*inode
,
429 struct compressed_bio
*cb
)
431 unsigned long end_index
;
432 unsigned long pg_index
;
434 u64 isize
= i_size_read(inode
);
437 unsigned long nr_pages
= 0;
438 struct extent_map
*em
;
439 struct address_space
*mapping
= inode
->i_mapping
;
440 struct extent_map_tree
*em_tree
;
441 struct extent_io_tree
*tree
;
445 last_offset
= bio_end_offset(cb
->orig_bio
);
446 em_tree
= &BTRFS_I(inode
)->extent_tree
;
447 tree
= &BTRFS_I(inode
)->io_tree
;
452 end_index
= (i_size_read(inode
) - 1) >> PAGE_SHIFT
;
454 while (last_offset
< compressed_end
) {
455 pg_index
= last_offset
>> PAGE_SHIFT
;
457 if (pg_index
> end_index
)
461 page
= radix_tree_lookup(&mapping
->page_tree
, pg_index
);
463 if (page
&& !radix_tree_exceptional_entry(page
)) {
470 page
= __page_cache_alloc(mapping_gfp_constraint(mapping
,
475 if (add_to_page_cache_lru(page
, mapping
, pg_index
, GFP_NOFS
)) {
480 end
= last_offset
+ PAGE_SIZE
- 1;
482 * at this point, we have a locked page in the page cache
483 * for these bytes in the file. But, we have to make
484 * sure they map to this compressed extent on disk.
486 set_page_extent_mapped(page
);
487 lock_extent(tree
, last_offset
, end
);
488 read_lock(&em_tree
->lock
);
489 em
= lookup_extent_mapping(em_tree
, last_offset
,
491 read_unlock(&em_tree
->lock
);
493 if (!em
|| last_offset
< em
->start
||
494 (last_offset
+ PAGE_SIZE
> extent_map_end(em
)) ||
495 (em
->block_start
>> 9) != cb
->orig_bio
->bi_iter
.bi_sector
) {
497 unlock_extent(tree
, last_offset
, end
);
504 if (page
->index
== end_index
) {
506 size_t zero_offset
= isize
& (PAGE_SIZE
- 1);
510 zeros
= PAGE_SIZE
- zero_offset
;
511 userpage
= kmap_atomic(page
);
512 memset(userpage
+ zero_offset
, 0, zeros
);
513 flush_dcache_page(page
);
514 kunmap_atomic(userpage
);
518 ret
= bio_add_page(cb
->orig_bio
, page
,
521 if (ret
== PAGE_SIZE
) {
525 unlock_extent(tree
, last_offset
, end
);
531 last_offset
+= PAGE_SIZE
;
537 * for a compressed read, the bio we get passed has all the inode pages
538 * in it. We don't actually do IO on those pages but allocate new ones
539 * to hold the compressed pages on disk.
541 * bio->bi_iter.bi_sector points to the compressed extent on disk
542 * bio->bi_io_vec points to all of the inode pages
544 * After the compressed pages are read, we copy the bytes into the
545 * bio we were passed and then call the bio end_io calls
547 blk_status_t
btrfs_submit_compressed_read(struct inode
*inode
, struct bio
*bio
,
548 int mirror_num
, unsigned long bio_flags
)
550 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
551 struct extent_io_tree
*tree
;
552 struct extent_map_tree
*em_tree
;
553 struct compressed_bio
*cb
;
554 unsigned long compressed_len
;
555 unsigned long nr_pages
;
556 unsigned long pg_index
;
558 struct block_device
*bdev
;
559 struct bio
*comp_bio
;
560 u64 cur_disk_byte
= (u64
)bio
->bi_iter
.bi_sector
<< 9;
563 struct extent_map
*em
;
564 blk_status_t ret
= BLK_STS_RESOURCE
;
568 tree
= &BTRFS_I(inode
)->io_tree
;
569 em_tree
= &BTRFS_I(inode
)->extent_tree
;
571 /* we need the actual starting offset of this extent in the file */
572 read_lock(&em_tree
->lock
);
573 em
= lookup_extent_mapping(em_tree
,
574 page_offset(bio_first_page_all(bio
)),
576 read_unlock(&em_tree
->lock
);
578 return BLK_STS_IOERR
;
580 compressed_len
= em
->block_len
;
581 cb
= kmalloc(compressed_bio_size(fs_info
, compressed_len
), GFP_NOFS
);
585 refcount_set(&cb
->pending_bios
, 0);
588 cb
->mirror_num
= mirror_num
;
591 cb
->start
= em
->orig_start
;
593 em_start
= em
->start
;
598 cb
->len
= bio
->bi_iter
.bi_size
;
599 cb
->compressed_len
= compressed_len
;
600 cb
->compress_type
= extent_compress_type(bio_flags
);
603 nr_pages
= DIV_ROUND_UP(compressed_len
, PAGE_SIZE
);
604 cb
->compressed_pages
= kcalloc(nr_pages
, sizeof(struct page
*),
606 if (!cb
->compressed_pages
)
609 bdev
= fs_info
->fs_devices
->latest_bdev
;
611 for (pg_index
= 0; pg_index
< nr_pages
; pg_index
++) {
612 cb
->compressed_pages
[pg_index
] = alloc_page(GFP_NOFS
|
614 if (!cb
->compressed_pages
[pg_index
]) {
615 faili
= pg_index
- 1;
616 ret
= BLK_STS_RESOURCE
;
620 faili
= nr_pages
- 1;
621 cb
->nr_pages
= nr_pages
;
623 add_ra_bio_pages(inode
, em_start
+ em_len
, cb
);
625 /* include any pages we added in add_ra-bio_pages */
626 cb
->len
= bio
->bi_iter
.bi_size
;
628 comp_bio
= btrfs_bio_alloc(bdev
, cur_disk_byte
);
629 bio_set_op_attrs (comp_bio
, REQ_OP_READ
, 0);
630 comp_bio
->bi_private
= cb
;
631 comp_bio
->bi_end_io
= end_compressed_bio_read
;
632 refcount_set(&cb
->pending_bios
, 1);
634 for (pg_index
= 0; pg_index
< nr_pages
; pg_index
++) {
637 page
= cb
->compressed_pages
[pg_index
];
638 page
->mapping
= inode
->i_mapping
;
639 page
->index
= em_start
>> PAGE_SHIFT
;
641 if (comp_bio
->bi_iter
.bi_size
)
642 submit
= tree
->ops
->merge_bio_hook(page
, 0,
646 page
->mapping
= NULL
;
647 if (submit
|| bio_add_page(comp_bio
, page
, PAGE_SIZE
, 0) <
649 ret
= btrfs_bio_wq_end_io(fs_info
, comp_bio
,
650 BTRFS_WQ_ENDIO_DATA
);
651 BUG_ON(ret
); /* -ENOMEM */
654 * inc the count before we submit the bio so
655 * we know the end IO handler won't happen before
656 * we inc the count. Otherwise, the cb might get
657 * freed before we're done setting it up
659 refcount_inc(&cb
->pending_bios
);
661 if (!(BTRFS_I(inode
)->flags
& BTRFS_INODE_NODATASUM
)) {
662 ret
= btrfs_lookup_bio_sums(inode
, comp_bio
,
664 BUG_ON(ret
); /* -ENOMEM */
666 sums
+= DIV_ROUND_UP(comp_bio
->bi_iter
.bi_size
,
667 fs_info
->sectorsize
);
669 ret
= btrfs_map_bio(fs_info
, comp_bio
, mirror_num
, 0);
671 comp_bio
->bi_status
= ret
;
675 comp_bio
= btrfs_bio_alloc(bdev
, cur_disk_byte
);
676 bio_set_op_attrs(comp_bio
, REQ_OP_READ
, 0);
677 comp_bio
->bi_private
= cb
;
678 comp_bio
->bi_end_io
= end_compressed_bio_read
;
680 bio_add_page(comp_bio
, page
, PAGE_SIZE
, 0);
682 cur_disk_byte
+= PAGE_SIZE
;
685 ret
= btrfs_bio_wq_end_io(fs_info
, comp_bio
, BTRFS_WQ_ENDIO_DATA
);
686 BUG_ON(ret
); /* -ENOMEM */
688 if (!(BTRFS_I(inode
)->flags
& BTRFS_INODE_NODATASUM
)) {
689 ret
= btrfs_lookup_bio_sums(inode
, comp_bio
, sums
);
690 BUG_ON(ret
); /* -ENOMEM */
693 ret
= btrfs_map_bio(fs_info
, comp_bio
, mirror_num
, 0);
695 comp_bio
->bi_status
= ret
;
703 __free_page(cb
->compressed_pages
[faili
]);
707 kfree(cb
->compressed_pages
);
716 * Heuristic uses systematic sampling to collect data from the input data
717 * range, the logic can be tuned by the following constants:
719 * @SAMPLING_READ_SIZE - how many bytes will be copied from for each sample
720 * @SAMPLING_INTERVAL - range from which the sampled data can be collected
722 #define SAMPLING_READ_SIZE (16)
723 #define SAMPLING_INTERVAL (256)
726 * For statistical analysis of the input data we consider bytes that form a
727 * Galois Field of 256 objects. Each object has an attribute count, ie. how
728 * many times the object appeared in the sample.
730 #define BUCKET_SIZE (256)
733 * The size of the sample is based on a statistical sampling rule of thumb.
734 * The common way is to perform sampling tests as long as the number of
735 * elements in each cell is at least 5.
737 * Instead of 5, we choose 32 to obtain more accurate results.
738 * If the data contain the maximum number of symbols, which is 256, we obtain a
739 * sample size bound by 8192.
741 * For a sample of at most 8KB of data per data range: 16 consecutive bytes
742 * from up to 512 locations.
744 #define MAX_SAMPLE_SIZE (BTRFS_MAX_UNCOMPRESSED * \
745 SAMPLING_READ_SIZE / SAMPLING_INTERVAL)
751 struct heuristic_ws
{
752 /* Partial copy of input data */
755 /* Buckets store counters for each byte value */
756 struct bucket_item
*bucket
;
758 struct bucket_item
*bucket_b
;
759 struct list_head list
;
762 static void free_heuristic_ws(struct list_head
*ws
)
764 struct heuristic_ws
*workspace
;
766 workspace
= list_entry(ws
, struct heuristic_ws
, list
);
768 kvfree(workspace
->sample
);
769 kfree(workspace
->bucket
);
770 kfree(workspace
->bucket_b
);
774 static struct list_head
*alloc_heuristic_ws(void)
776 struct heuristic_ws
*ws
;
778 ws
= kzalloc(sizeof(*ws
), GFP_KERNEL
);
780 return ERR_PTR(-ENOMEM
);
782 ws
->sample
= kvmalloc(MAX_SAMPLE_SIZE
, GFP_KERNEL
);
786 ws
->bucket
= kcalloc(BUCKET_SIZE
, sizeof(*ws
->bucket
), GFP_KERNEL
);
790 ws
->bucket_b
= kcalloc(BUCKET_SIZE
, sizeof(*ws
->bucket_b
), GFP_KERNEL
);
794 INIT_LIST_HEAD(&ws
->list
);
797 free_heuristic_ws(&ws
->list
);
798 return ERR_PTR(-ENOMEM
);
801 struct workspaces_list
{
802 struct list_head idle_ws
;
804 /* Number of free workspaces */
806 /* Total number of allocated workspaces */
808 /* Waiters for a free workspace */
809 wait_queue_head_t ws_wait
;
812 static struct workspaces_list btrfs_comp_ws
[BTRFS_COMPRESS_TYPES
];
814 static struct workspaces_list btrfs_heuristic_ws
;
816 static const struct btrfs_compress_op
* const btrfs_compress_op
[] = {
817 &btrfs_zlib_compress
,
819 &btrfs_zstd_compress
,
822 void __init
btrfs_init_compress(void)
824 struct list_head
*workspace
;
827 INIT_LIST_HEAD(&btrfs_heuristic_ws
.idle_ws
);
828 spin_lock_init(&btrfs_heuristic_ws
.ws_lock
);
829 atomic_set(&btrfs_heuristic_ws
.total_ws
, 0);
830 init_waitqueue_head(&btrfs_heuristic_ws
.ws_wait
);
832 workspace
= alloc_heuristic_ws();
833 if (IS_ERR(workspace
)) {
835 "BTRFS: cannot preallocate heuristic workspace, will try later\n");
837 atomic_set(&btrfs_heuristic_ws
.total_ws
, 1);
838 btrfs_heuristic_ws
.free_ws
= 1;
839 list_add(workspace
, &btrfs_heuristic_ws
.idle_ws
);
842 for (i
= 0; i
< BTRFS_COMPRESS_TYPES
; i
++) {
843 INIT_LIST_HEAD(&btrfs_comp_ws
[i
].idle_ws
);
844 spin_lock_init(&btrfs_comp_ws
[i
].ws_lock
);
845 atomic_set(&btrfs_comp_ws
[i
].total_ws
, 0);
846 init_waitqueue_head(&btrfs_comp_ws
[i
].ws_wait
);
849 * Preallocate one workspace for each compression type so
850 * we can guarantee forward progress in the worst case
852 workspace
= btrfs_compress_op
[i
]->alloc_workspace();
853 if (IS_ERR(workspace
)) {
854 pr_warn("BTRFS: cannot preallocate compression workspace, will try later\n");
856 atomic_set(&btrfs_comp_ws
[i
].total_ws
, 1);
857 btrfs_comp_ws
[i
].free_ws
= 1;
858 list_add(workspace
, &btrfs_comp_ws
[i
].idle_ws
);
864 * This finds an available workspace or allocates a new one.
865 * If it's not possible to allocate a new one, waits until there's one.
866 * Preallocation makes a forward progress guarantees and we do not return
869 static struct list_head
*__find_workspace(int type
, bool heuristic
)
871 struct list_head
*workspace
;
872 int cpus
= num_online_cpus();
875 struct list_head
*idle_ws
;
878 wait_queue_head_t
*ws_wait
;
882 idle_ws
= &btrfs_heuristic_ws
.idle_ws
;
883 ws_lock
= &btrfs_heuristic_ws
.ws_lock
;
884 total_ws
= &btrfs_heuristic_ws
.total_ws
;
885 ws_wait
= &btrfs_heuristic_ws
.ws_wait
;
886 free_ws
= &btrfs_heuristic_ws
.free_ws
;
888 idle_ws
= &btrfs_comp_ws
[idx
].idle_ws
;
889 ws_lock
= &btrfs_comp_ws
[idx
].ws_lock
;
890 total_ws
= &btrfs_comp_ws
[idx
].total_ws
;
891 ws_wait
= &btrfs_comp_ws
[idx
].ws_wait
;
892 free_ws
= &btrfs_comp_ws
[idx
].free_ws
;
897 if (!list_empty(idle_ws
)) {
898 workspace
= idle_ws
->next
;
901 spin_unlock(ws_lock
);
905 if (atomic_read(total_ws
) > cpus
) {
908 spin_unlock(ws_lock
);
909 prepare_to_wait(ws_wait
, &wait
, TASK_UNINTERRUPTIBLE
);
910 if (atomic_read(total_ws
) > cpus
&& !*free_ws
)
912 finish_wait(ws_wait
, &wait
);
915 atomic_inc(total_ws
);
916 spin_unlock(ws_lock
);
919 * Allocation helpers call vmalloc that can't use GFP_NOFS, so we have
920 * to turn it off here because we might get called from the restricted
921 * context of btrfs_compress_bio/btrfs_compress_pages
923 nofs_flag
= memalloc_nofs_save();
925 workspace
= alloc_heuristic_ws();
927 workspace
= btrfs_compress_op
[idx
]->alloc_workspace();
928 memalloc_nofs_restore(nofs_flag
);
930 if (IS_ERR(workspace
)) {
931 atomic_dec(total_ws
);
935 * Do not return the error but go back to waiting. There's a
936 * workspace preallocated for each type and the compression
937 * time is bounded so we get to a workspace eventually. This
938 * makes our caller's life easier.
940 * To prevent silent and low-probability deadlocks (when the
941 * initial preallocation fails), check if there are any
944 if (atomic_read(total_ws
) == 0) {
945 static DEFINE_RATELIMIT_STATE(_rs
,
946 /* once per minute */ 60 * HZ
,
949 if (__ratelimit(&_rs
)) {
950 pr_warn("BTRFS: no compression workspaces, low memory, retrying\n");
958 static struct list_head
*find_workspace(int type
)
960 return __find_workspace(type
, false);
964 * put a workspace struct back on the list or free it if we have enough
965 * idle ones sitting around
967 static void __free_workspace(int type
, struct list_head
*workspace
,
971 struct list_head
*idle_ws
;
974 wait_queue_head_t
*ws_wait
;
978 idle_ws
= &btrfs_heuristic_ws
.idle_ws
;
979 ws_lock
= &btrfs_heuristic_ws
.ws_lock
;
980 total_ws
= &btrfs_heuristic_ws
.total_ws
;
981 ws_wait
= &btrfs_heuristic_ws
.ws_wait
;
982 free_ws
= &btrfs_heuristic_ws
.free_ws
;
984 idle_ws
= &btrfs_comp_ws
[idx
].idle_ws
;
985 ws_lock
= &btrfs_comp_ws
[idx
].ws_lock
;
986 total_ws
= &btrfs_comp_ws
[idx
].total_ws
;
987 ws_wait
= &btrfs_comp_ws
[idx
].ws_wait
;
988 free_ws
= &btrfs_comp_ws
[idx
].free_ws
;
992 if (*free_ws
<= num_online_cpus()) {
993 list_add(workspace
, idle_ws
);
995 spin_unlock(ws_lock
);
998 spin_unlock(ws_lock
);
1001 free_heuristic_ws(workspace
);
1003 btrfs_compress_op
[idx
]->free_workspace(workspace
);
1004 atomic_dec(total_ws
);
1007 * Make sure counter is updated before we wake up waiters.
1010 if (waitqueue_active(ws_wait
))
1014 static void free_workspace(int type
, struct list_head
*ws
)
1016 return __free_workspace(type
, ws
, false);
1020 * cleanup function for module exit
1022 static void free_workspaces(void)
1024 struct list_head
*workspace
;
1027 while (!list_empty(&btrfs_heuristic_ws
.idle_ws
)) {
1028 workspace
= btrfs_heuristic_ws
.idle_ws
.next
;
1029 list_del(workspace
);
1030 free_heuristic_ws(workspace
);
1031 atomic_dec(&btrfs_heuristic_ws
.total_ws
);
1034 for (i
= 0; i
< BTRFS_COMPRESS_TYPES
; i
++) {
1035 while (!list_empty(&btrfs_comp_ws
[i
].idle_ws
)) {
1036 workspace
= btrfs_comp_ws
[i
].idle_ws
.next
;
1037 list_del(workspace
);
1038 btrfs_compress_op
[i
]->free_workspace(workspace
);
1039 atomic_dec(&btrfs_comp_ws
[i
].total_ws
);
1045 * Given an address space and start and length, compress the bytes into @pages
1046 * that are allocated on demand.
1048 * @type_level is encoded algorithm and level, where level 0 means whatever
1049 * default the algorithm chooses and is opaque here;
1050 * - compression algo are 0-3
1051 * - the level are bits 4-7
1053 * @out_pages is an in/out parameter, holds maximum number of pages to allocate
1054 * and returns number of actually allocated pages
1056 * @total_in is used to return the number of bytes actually read. It
1057 * may be smaller than the input length if we had to exit early because we
1058 * ran out of room in the pages array or because we cross the
1059 * max_out threshold.
1061 * @total_out is an in/out parameter, must be set to the input length and will
1062 * be also used to return the total number of compressed bytes
1064 * @max_out tells us the max number of bytes that we're allowed to
1067 int btrfs_compress_pages(unsigned int type_level
, struct address_space
*mapping
,
1068 u64 start
, struct page
**pages
,
1069 unsigned long *out_pages
,
1070 unsigned long *total_in
,
1071 unsigned long *total_out
)
1073 struct list_head
*workspace
;
1075 int type
= type_level
& 0xF;
1077 workspace
= find_workspace(type
);
1079 btrfs_compress_op
[type
- 1]->set_level(workspace
, type_level
);
1080 ret
= btrfs_compress_op
[type
-1]->compress_pages(workspace
, mapping
,
1083 total_in
, total_out
);
1084 free_workspace(type
, workspace
);
1089 * pages_in is an array of pages with compressed data.
1091 * disk_start is the starting logical offset of this array in the file
1093 * orig_bio contains the pages from the file that we want to decompress into
1095 * srclen is the number of bytes in pages_in
1097 * The basic idea is that we have a bio that was created by readpages.
1098 * The pages in the bio are for the uncompressed data, and they may not
1099 * be contiguous. They all correspond to the range of bytes covered by
1100 * the compressed extent.
1102 static int btrfs_decompress_bio(struct compressed_bio
*cb
)
1104 struct list_head
*workspace
;
1106 int type
= cb
->compress_type
;
1108 workspace
= find_workspace(type
);
1109 ret
= btrfs_compress_op
[type
- 1]->decompress_bio(workspace
, cb
);
1110 free_workspace(type
, workspace
);
1116 * a less complex decompression routine. Our compressed data fits in a
1117 * single page, and we want to read a single page out of it.
1118 * start_byte tells us the offset into the compressed data we're interested in
1120 int btrfs_decompress(int type
, unsigned char *data_in
, struct page
*dest_page
,
1121 unsigned long start_byte
, size_t srclen
, size_t destlen
)
1123 struct list_head
*workspace
;
1126 workspace
= find_workspace(type
);
1128 ret
= btrfs_compress_op
[type
-1]->decompress(workspace
, data_in
,
1129 dest_page
, start_byte
,
1132 free_workspace(type
, workspace
);
1136 void btrfs_exit_compress(void)
1142 * Copy uncompressed data from working buffer to pages.
1144 * buf_start is the byte offset we're of the start of our workspace buffer.
1146 * total_out is the last byte of the buffer
1148 int btrfs_decompress_buf2page(const char *buf
, unsigned long buf_start
,
1149 unsigned long total_out
, u64 disk_start
,
1152 unsigned long buf_offset
;
1153 unsigned long current_buf_start
;
1154 unsigned long start_byte
;
1155 unsigned long prev_start_byte
;
1156 unsigned long working_bytes
= total_out
- buf_start
;
1157 unsigned long bytes
;
1159 struct bio_vec bvec
= bio_iter_iovec(bio
, bio
->bi_iter
);
1162 * start byte is the first byte of the page we're currently
1163 * copying into relative to the start of the compressed data.
1165 start_byte
= page_offset(bvec
.bv_page
) - disk_start
;
1167 /* we haven't yet hit data corresponding to this page */
1168 if (total_out
<= start_byte
)
1172 * the start of the data we care about is offset into
1173 * the middle of our working buffer
1175 if (total_out
> start_byte
&& buf_start
< start_byte
) {
1176 buf_offset
= start_byte
- buf_start
;
1177 working_bytes
-= buf_offset
;
1181 current_buf_start
= buf_start
;
1183 /* copy bytes from the working buffer into the pages */
1184 while (working_bytes
> 0) {
1185 bytes
= min_t(unsigned long, bvec
.bv_len
,
1186 PAGE_SIZE
- buf_offset
);
1187 bytes
= min(bytes
, working_bytes
);
1189 kaddr
= kmap_atomic(bvec
.bv_page
);
1190 memcpy(kaddr
+ bvec
.bv_offset
, buf
+ buf_offset
, bytes
);
1191 kunmap_atomic(kaddr
);
1192 flush_dcache_page(bvec
.bv_page
);
1194 buf_offset
+= bytes
;
1195 working_bytes
-= bytes
;
1196 current_buf_start
+= bytes
;
1198 /* check if we need to pick another page */
1199 bio_advance(bio
, bytes
);
1200 if (!bio
->bi_iter
.bi_size
)
1202 bvec
= bio_iter_iovec(bio
, bio
->bi_iter
);
1203 prev_start_byte
= start_byte
;
1204 start_byte
= page_offset(bvec
.bv_page
) - disk_start
;
1207 * We need to make sure we're only adjusting
1208 * our offset into compression working buffer when
1209 * we're switching pages. Otherwise we can incorrectly
1210 * keep copying when we were actually done.
1212 if (start_byte
!= prev_start_byte
) {
1214 * make sure our new page is covered by this
1217 if (total_out
<= start_byte
)
1221 * the next page in the biovec might not be adjacent
1222 * to the last page, but it might still be found
1223 * inside this working buffer. bump our offset pointer
1225 if (total_out
> start_byte
&&
1226 current_buf_start
< start_byte
) {
1227 buf_offset
= start_byte
- buf_start
;
1228 working_bytes
= total_out
- start_byte
;
1229 current_buf_start
= buf_start
+ buf_offset
;
1238 * Shannon Entropy calculation
1240 * Pure byte distribution analysis fails to determine compressiability of data.
1241 * Try calculating entropy to estimate the average minimum number of bits
1242 * needed to encode the sampled data.
1244 * For convenience, return the percentage of needed bits, instead of amount of
1247 * @ENTROPY_LVL_ACEPTABLE - below that threshold, sample has low byte entropy
1248 * and can be compressible with high probability
1250 * @ENTROPY_LVL_HIGH - data are not compressible with high probability
1252 * Use of ilog2() decreases precision, we lower the LVL to 5 to compensate.
1254 #define ENTROPY_LVL_ACEPTABLE (65)
1255 #define ENTROPY_LVL_HIGH (80)
1258 * For increasead precision in shannon_entropy calculation,
1259 * let's do pow(n, M) to save more digits after comma:
1261 * - maximum int bit length is 64
1262 * - ilog2(MAX_SAMPLE_SIZE) -> 13
1263 * - 13 * 4 = 52 < 64 -> M = 4
1267 static inline u32
ilog2_w(u64 n
)
1269 return ilog2(n
* n
* n
* n
);
1272 static u32
shannon_entropy(struct heuristic_ws
*ws
)
1274 const u32 entropy_max
= 8 * ilog2_w(2);
1275 u32 entropy_sum
= 0;
1276 u32 p
, p_base
, sz_base
;
1279 sz_base
= ilog2_w(ws
->sample_size
);
1280 for (i
= 0; i
< BUCKET_SIZE
&& ws
->bucket
[i
].count
> 0; i
++) {
1281 p
= ws
->bucket
[i
].count
;
1282 p_base
= ilog2_w(p
);
1283 entropy_sum
+= p
* (sz_base
- p_base
);
1286 entropy_sum
/= ws
->sample_size
;
1287 return entropy_sum
* 100 / entropy_max
;
1290 #define RADIX_BASE 4U
1291 #define COUNTERS_SIZE (1U << RADIX_BASE)
1293 static u8
get4bits(u64 num
, int shift
) {
1298 low4bits
= (COUNTERS_SIZE
- 1) - (num
% COUNTERS_SIZE
);
1303 * Use 4 bits as radix base
1304 * Use 16 u32 counters for calculating new possition in buf array
1306 * @array - array that will be sorted
1307 * @array_buf - buffer array to store sorting results
1308 * must be equal in size to @array
1311 static void radix_sort(struct bucket_item
*array
, struct bucket_item
*array_buf
,
1316 u32 counters
[COUNTERS_SIZE
];
1324 * Try avoid useless loop iterations for small numbers stored in big
1325 * counters. Example: 48 33 4 ... in 64bit array
1327 max_num
= array
[0].count
;
1328 for (i
= 1; i
< num
; i
++) {
1329 buf_num
= array
[i
].count
;
1330 if (buf_num
> max_num
)
1334 buf_num
= ilog2(max_num
);
1335 bitlen
= ALIGN(buf_num
, RADIX_BASE
* 2);
1338 while (shift
< bitlen
) {
1339 memset(counters
, 0, sizeof(counters
));
1341 for (i
= 0; i
< num
; i
++) {
1342 buf_num
= array
[i
].count
;
1343 addr
= get4bits(buf_num
, shift
);
1347 for (i
= 1; i
< COUNTERS_SIZE
; i
++)
1348 counters
[i
] += counters
[i
- 1];
1350 for (i
= num
- 1; i
>= 0; i
--) {
1351 buf_num
= array
[i
].count
;
1352 addr
= get4bits(buf_num
, shift
);
1354 new_addr
= counters
[addr
];
1355 array_buf
[new_addr
] = array
[i
];
1358 shift
+= RADIX_BASE
;
1361 * Normal radix expects to move data from a temporary array, to
1362 * the main one. But that requires some CPU time. Avoid that
1363 * by doing another sort iteration to original array instead of
1366 memset(counters
, 0, sizeof(counters
));
1368 for (i
= 0; i
< num
; i
++) {
1369 buf_num
= array_buf
[i
].count
;
1370 addr
= get4bits(buf_num
, shift
);
1374 for (i
= 1; i
< COUNTERS_SIZE
; i
++)
1375 counters
[i
] += counters
[i
- 1];
1377 for (i
= num
- 1; i
>= 0; i
--) {
1378 buf_num
= array_buf
[i
].count
;
1379 addr
= get4bits(buf_num
, shift
);
1381 new_addr
= counters
[addr
];
1382 array
[new_addr
] = array_buf
[i
];
1385 shift
+= RADIX_BASE
;
1390 * Size of the core byte set - how many bytes cover 90% of the sample
1392 * There are several types of structured binary data that use nearly all byte
1393 * values. The distribution can be uniform and counts in all buckets will be
1394 * nearly the same (eg. encrypted data). Unlikely to be compressible.
1396 * Other possibility is normal (Gaussian) distribution, where the data could
1397 * be potentially compressible, but we have to take a few more steps to decide
1400 * @BYTE_CORE_SET_LOW - main part of byte values repeated frequently,
1401 * compression algo can easy fix that
1402 * @BYTE_CORE_SET_HIGH - data have uniform distribution and with high
1403 * probability is not compressible
1405 #define BYTE_CORE_SET_LOW (64)
1406 #define BYTE_CORE_SET_HIGH (200)
1408 static int byte_core_set_size(struct heuristic_ws
*ws
)
1411 u32 coreset_sum
= 0;
1412 const u32 core_set_threshold
= ws
->sample_size
* 90 / 100;
1413 struct bucket_item
*bucket
= ws
->bucket
;
1415 /* Sort in reverse order */
1416 radix_sort(ws
->bucket
, ws
->bucket_b
, BUCKET_SIZE
);
1418 for (i
= 0; i
< BYTE_CORE_SET_LOW
; i
++)
1419 coreset_sum
+= bucket
[i
].count
;
1421 if (coreset_sum
> core_set_threshold
)
1424 for (; i
< BYTE_CORE_SET_HIGH
&& bucket
[i
].count
> 0; i
++) {
1425 coreset_sum
+= bucket
[i
].count
;
1426 if (coreset_sum
> core_set_threshold
)
1434 * Count byte values in buckets.
1435 * This heuristic can detect textual data (configs, xml, json, html, etc).
1436 * Because in most text-like data byte set is restricted to limited number of
1437 * possible characters, and that restriction in most cases makes data easy to
1440 * @BYTE_SET_THRESHOLD - consider all data within this byte set size:
1441 * less - compressible
1442 * more - need additional analysis
1444 #define BYTE_SET_THRESHOLD (64)
1446 static u32
byte_set_size(const struct heuristic_ws
*ws
)
1449 u32 byte_set_size
= 0;
1451 for (i
= 0; i
< BYTE_SET_THRESHOLD
; i
++) {
1452 if (ws
->bucket
[i
].count
> 0)
1457 * Continue collecting count of byte values in buckets. If the byte
1458 * set size is bigger then the threshold, it's pointless to continue,
1459 * the detection technique would fail for this type of data.
1461 for (; i
< BUCKET_SIZE
; i
++) {
1462 if (ws
->bucket
[i
].count
> 0) {
1464 if (byte_set_size
> BYTE_SET_THRESHOLD
)
1465 return byte_set_size
;
1469 return byte_set_size
;
1472 static bool sample_repeated_patterns(struct heuristic_ws
*ws
)
1474 const u32 half_of_sample
= ws
->sample_size
/ 2;
1475 const u8
*data
= ws
->sample
;
1477 return memcmp(&data
[0], &data
[half_of_sample
], half_of_sample
) == 0;
1480 static void heuristic_collect_sample(struct inode
*inode
, u64 start
, u64 end
,
1481 struct heuristic_ws
*ws
)
1484 u64 index
, index_end
;
1485 u32 i
, curr_sample_pos
;
1489 * Compression handles the input data by chunks of 128KiB
1490 * (defined by BTRFS_MAX_UNCOMPRESSED)
1492 * We do the same for the heuristic and loop over the whole range.
1494 * MAX_SAMPLE_SIZE - calculated under assumption that heuristic will
1495 * process no more than BTRFS_MAX_UNCOMPRESSED at a time.
1497 if (end
- start
> BTRFS_MAX_UNCOMPRESSED
)
1498 end
= start
+ BTRFS_MAX_UNCOMPRESSED
;
1500 index
= start
>> PAGE_SHIFT
;
1501 index_end
= end
>> PAGE_SHIFT
;
1503 /* Don't miss unaligned end */
1504 if (!IS_ALIGNED(end
, PAGE_SIZE
))
1507 curr_sample_pos
= 0;
1508 while (index
< index_end
) {
1509 page
= find_get_page(inode
->i_mapping
, index
);
1510 in_data
= kmap(page
);
1511 /* Handle case where the start is not aligned to PAGE_SIZE */
1512 i
= start
% PAGE_SIZE
;
1513 while (i
< PAGE_SIZE
- SAMPLING_READ_SIZE
) {
1514 /* Don't sample any garbage from the last page */
1515 if (start
> end
- SAMPLING_READ_SIZE
)
1517 memcpy(&ws
->sample
[curr_sample_pos
], &in_data
[i
],
1518 SAMPLING_READ_SIZE
);
1519 i
+= SAMPLING_INTERVAL
;
1520 start
+= SAMPLING_INTERVAL
;
1521 curr_sample_pos
+= SAMPLING_READ_SIZE
;
1529 ws
->sample_size
= curr_sample_pos
;
1533 * Compression heuristic.
1535 * For now is's a naive and optimistic 'return true', we'll extend the logic to
1536 * quickly (compared to direct compression) detect data characteristics
1537 * (compressible/uncompressible) to avoid wasting CPU time on uncompressible
1540 * The following types of analysis can be performed:
1541 * - detect mostly zero data
1542 * - detect data with low "byte set" size (text, etc)
1543 * - detect data with low/high "core byte" set
1545 * Return non-zero if the compression should be done, 0 otherwise.
1547 int btrfs_compress_heuristic(struct inode
*inode
, u64 start
, u64 end
)
1549 struct list_head
*ws_list
= __find_workspace(0, true);
1550 struct heuristic_ws
*ws
;
1555 ws
= list_entry(ws_list
, struct heuristic_ws
, list
);
1557 heuristic_collect_sample(inode
, start
, end
, ws
);
1559 if (sample_repeated_patterns(ws
)) {
1564 memset(ws
->bucket
, 0, sizeof(*ws
->bucket
)*BUCKET_SIZE
);
1566 for (i
= 0; i
< ws
->sample_size
; i
++) {
1567 byte
= ws
->sample
[i
];
1568 ws
->bucket
[byte
].count
++;
1571 i
= byte_set_size(ws
);
1572 if (i
< BYTE_SET_THRESHOLD
) {
1577 i
= byte_core_set_size(ws
);
1578 if (i
<= BYTE_CORE_SET_LOW
) {
1583 if (i
>= BYTE_CORE_SET_HIGH
) {
1588 i
= shannon_entropy(ws
);
1589 if (i
<= ENTROPY_LVL_ACEPTABLE
) {
1595 * For the levels below ENTROPY_LVL_HIGH, additional analysis would be
1596 * needed to give green light to compression.
1598 * For now just assume that compression at that level is not worth the
1599 * resources because:
1601 * 1. it is possible to defrag the data later
1603 * 2. the data would turn out to be hardly compressible, eg. 150 byte
1604 * values, every bucket has counter at level ~54. The heuristic would
1605 * be confused. This can happen when data have some internal repeated
1606 * patterns like "abbacbbc...". This can be detected by analyzing
1607 * pairs of bytes, which is too costly.
1609 if (i
< ENTROPY_LVL_HIGH
) {
1618 __free_workspace(0, ws_list
, true);
1622 unsigned int btrfs_compress_str2level(const char *str
)
1624 if (strncmp(str
, "zlib", 4) != 0)
1627 /* Accepted form: zlib:1 up to zlib:9 and nothing left after the number */
1628 if (str
[4] == ':' && '1' <= str
[5] && str
[5] <= '9' && str
[6] == 0)
1629 return str
[5] - '0';
1631 return BTRFS_ZLIB_DEFAULT_LEVEL
;