1 // SPDX-License-Identifier: GPL-2.0
3 * f2fs compress support
5 * Copyright (c) 2019 Chao Yu <chao@kernel.org>
9 #include <linux/f2fs_fs.h>
10 #include <linux/writeback.h>
11 #include <linux/backing-dev.h>
12 #include <linux/lzo.h>
13 #include <linux/lz4.h>
14 #include <linux/zstd.h>
18 #include <trace/events/f2fs.h>
20 struct f2fs_compress_ops
{
21 int (*init_compress_ctx
)(struct compress_ctx
*cc
);
22 void (*destroy_compress_ctx
)(struct compress_ctx
*cc
);
23 int (*compress_pages
)(struct compress_ctx
*cc
);
24 int (*init_decompress_ctx
)(struct decompress_io_ctx
*dic
);
25 void (*destroy_decompress_ctx
)(struct decompress_io_ctx
*dic
);
26 int (*decompress_pages
)(struct decompress_io_ctx
*dic
);
29 static unsigned int offset_in_cluster(struct compress_ctx
*cc
, pgoff_t index
)
31 return index
& (cc
->cluster_size
- 1);
34 static pgoff_t
cluster_idx(struct compress_ctx
*cc
, pgoff_t index
)
36 return index
>> cc
->log_cluster_size
;
39 static pgoff_t
start_idx_of_cluster(struct compress_ctx
*cc
)
41 return cc
->cluster_idx
<< cc
->log_cluster_size
;
44 bool f2fs_is_compressed_page(struct page
*page
)
46 if (!PagePrivate(page
))
48 if (!page_private(page
))
50 if (IS_ATOMIC_WRITTEN_PAGE(page
) || IS_DUMMY_WRITTEN_PAGE(page
))
52 f2fs_bug_on(F2FS_M_SB(page
->mapping
),
53 *((u32
*)page_private(page
)) != F2FS_COMPRESSED_PAGE_MAGIC
);
57 static void f2fs_set_compressed_page(struct page
*page
,
58 struct inode
*inode
, pgoff_t index
, void *data
)
61 set_page_private(page
, (unsigned long)data
);
63 /* i_crypto_info and iv index */
65 page
->mapping
= inode
->i_mapping
;
68 static void f2fs_put_compressed_page(struct page
*page
)
70 set_page_private(page
, (unsigned long)NULL
);
71 ClearPagePrivate(page
);
77 static void f2fs_drop_rpages(struct compress_ctx
*cc
, int len
, bool unlock
)
81 for (i
= 0; i
< len
; i
++) {
85 unlock_page(cc
->rpages
[i
]);
87 put_page(cc
->rpages
[i
]);
91 static void f2fs_put_rpages(struct compress_ctx
*cc
)
93 f2fs_drop_rpages(cc
, cc
->cluster_size
, false);
96 static void f2fs_unlock_rpages(struct compress_ctx
*cc
, int len
)
98 f2fs_drop_rpages(cc
, len
, true);
101 static void f2fs_put_rpages_mapping(struct compress_ctx
*cc
,
102 struct address_space
*mapping
,
103 pgoff_t start
, int len
)
107 for (i
= 0; i
< len
; i
++) {
108 struct page
*page
= find_get_page(mapping
, start
+ i
);
115 static void f2fs_put_rpages_wbc(struct compress_ctx
*cc
,
116 struct writeback_control
*wbc
, bool redirty
, int unlock
)
120 for (i
= 0; i
< cc
->cluster_size
; i
++) {
124 redirty_page_for_writepage(wbc
, cc
->rpages
[i
]);
125 f2fs_put_page(cc
->rpages
[i
], unlock
);
129 struct page
*f2fs_compress_control_page(struct page
*page
)
131 return ((struct compress_io_ctx
*)page_private(page
))->rpages
[0];
134 int f2fs_init_compress_ctx(struct compress_ctx
*cc
)
136 struct f2fs_sb_info
*sbi
= F2FS_I_SB(cc
->inode
);
141 cc
->rpages
= f2fs_kzalloc(sbi
, sizeof(struct page
*) <<
142 cc
->log_cluster_size
, GFP_NOFS
);
143 return cc
->rpages
? 0 : -ENOMEM
;
146 void f2fs_destroy_compress_ctx(struct compress_ctx
*cc
)
152 cc
->cluster_idx
= NULL_CLUSTER
;
155 void f2fs_compress_ctx_add_page(struct compress_ctx
*cc
, struct page
*page
)
157 unsigned int cluster_ofs
;
159 if (!f2fs_cluster_can_merge_page(cc
, page
->index
))
160 f2fs_bug_on(F2FS_I_SB(cc
->inode
), 1);
162 cluster_ofs
= offset_in_cluster(cc
, page
->index
);
163 cc
->rpages
[cluster_ofs
] = page
;
165 cc
->cluster_idx
= cluster_idx(cc
, page
->index
);
168 #ifdef CONFIG_F2FS_FS_LZO
169 static int lzo_init_compress_ctx(struct compress_ctx
*cc
)
171 cc
->private = f2fs_kvmalloc(F2FS_I_SB(cc
->inode
),
172 LZO1X_MEM_COMPRESS
, GFP_NOFS
);
176 cc
->clen
= lzo1x_worst_compress(PAGE_SIZE
<< cc
->log_cluster_size
);
180 static void lzo_destroy_compress_ctx(struct compress_ctx
*cc
)
186 static int lzo_compress_pages(struct compress_ctx
*cc
)
190 ret
= lzo1x_1_compress(cc
->rbuf
, cc
->rlen
, cc
->cbuf
->cdata
,
191 &cc
->clen
, cc
->private);
192 if (ret
!= LZO_E_OK
) {
193 printk_ratelimited("%sF2FS-fs (%s): lzo compress failed, ret:%d\n",
194 KERN_ERR
, F2FS_I_SB(cc
->inode
)->sb
->s_id
, ret
);
200 static int lzo_decompress_pages(struct decompress_io_ctx
*dic
)
204 ret
= lzo1x_decompress_safe(dic
->cbuf
->cdata
, dic
->clen
,
205 dic
->rbuf
, &dic
->rlen
);
206 if (ret
!= LZO_E_OK
) {
207 printk_ratelimited("%sF2FS-fs (%s): lzo decompress failed, ret:%d\n",
208 KERN_ERR
, F2FS_I_SB(dic
->inode
)->sb
->s_id
, ret
);
212 if (dic
->rlen
!= PAGE_SIZE
<< dic
->log_cluster_size
) {
213 printk_ratelimited("%sF2FS-fs (%s): lzo invalid rlen:%zu, "
214 "expected:%lu\n", KERN_ERR
,
215 F2FS_I_SB(dic
->inode
)->sb
->s_id
,
217 PAGE_SIZE
<< dic
->log_cluster_size
);
223 static const struct f2fs_compress_ops f2fs_lzo_ops
= {
224 .init_compress_ctx
= lzo_init_compress_ctx
,
225 .destroy_compress_ctx
= lzo_destroy_compress_ctx
,
226 .compress_pages
= lzo_compress_pages
,
227 .decompress_pages
= lzo_decompress_pages
,
231 #ifdef CONFIG_F2FS_FS_LZ4
232 static int lz4_init_compress_ctx(struct compress_ctx
*cc
)
234 cc
->private = f2fs_kvmalloc(F2FS_I_SB(cc
->inode
),
235 LZ4_MEM_COMPRESS
, GFP_NOFS
);
240 * we do not change cc->clen to LZ4_compressBound(inputsize) to
241 * adapt worst compress case, because lz4 compressor can handle
242 * output budget properly.
244 cc
->clen
= cc
->rlen
- PAGE_SIZE
- COMPRESS_HEADER_SIZE
;
248 static void lz4_destroy_compress_ctx(struct compress_ctx
*cc
)
254 static int lz4_compress_pages(struct compress_ctx
*cc
)
258 len
= LZ4_compress_default(cc
->rbuf
, cc
->cbuf
->cdata
, cc
->rlen
,
259 cc
->clen
, cc
->private);
267 static int lz4_decompress_pages(struct decompress_io_ctx
*dic
)
271 ret
= LZ4_decompress_safe(dic
->cbuf
->cdata
, dic
->rbuf
,
272 dic
->clen
, dic
->rlen
);
274 printk_ratelimited("%sF2FS-fs (%s): lz4 decompress failed, ret:%d\n",
275 KERN_ERR
, F2FS_I_SB(dic
->inode
)->sb
->s_id
, ret
);
279 if (ret
!= PAGE_SIZE
<< dic
->log_cluster_size
) {
280 printk_ratelimited("%sF2FS-fs (%s): lz4 invalid rlen:%zu, "
281 "expected:%lu\n", KERN_ERR
,
282 F2FS_I_SB(dic
->inode
)->sb
->s_id
,
284 PAGE_SIZE
<< dic
->log_cluster_size
);
290 static const struct f2fs_compress_ops f2fs_lz4_ops
= {
291 .init_compress_ctx
= lz4_init_compress_ctx
,
292 .destroy_compress_ctx
= lz4_destroy_compress_ctx
,
293 .compress_pages
= lz4_compress_pages
,
294 .decompress_pages
= lz4_decompress_pages
,
298 #ifdef CONFIG_F2FS_FS_ZSTD
299 #define F2FS_ZSTD_DEFAULT_CLEVEL 1
301 static int zstd_init_compress_ctx(struct compress_ctx
*cc
)
303 ZSTD_parameters params
;
304 ZSTD_CStream
*stream
;
306 unsigned int workspace_size
;
308 params
= ZSTD_getParams(F2FS_ZSTD_DEFAULT_CLEVEL
, cc
->rlen
, 0);
309 workspace_size
= ZSTD_CStreamWorkspaceBound(params
.cParams
);
311 workspace
= f2fs_kvmalloc(F2FS_I_SB(cc
->inode
),
312 workspace_size
, GFP_NOFS
);
316 stream
= ZSTD_initCStream(params
, 0, workspace
, workspace_size
);
318 printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_initCStream failed\n",
319 KERN_ERR
, F2FS_I_SB(cc
->inode
)->sb
->s_id
,
325 cc
->private = workspace
;
326 cc
->private2
= stream
;
328 cc
->clen
= cc
->rlen
- PAGE_SIZE
- COMPRESS_HEADER_SIZE
;
332 static void zstd_destroy_compress_ctx(struct compress_ctx
*cc
)
339 static int zstd_compress_pages(struct compress_ctx
*cc
)
341 ZSTD_CStream
*stream
= cc
->private2
;
343 ZSTD_outBuffer outbuf
;
344 int src_size
= cc
->rlen
;
345 int dst_size
= src_size
- PAGE_SIZE
- COMPRESS_HEADER_SIZE
;
349 inbuf
.src
= cc
->rbuf
;
350 inbuf
.size
= src_size
;
353 outbuf
.dst
= cc
->cbuf
->cdata
;
354 outbuf
.size
= dst_size
;
356 ret
= ZSTD_compressStream(stream
, &outbuf
, &inbuf
);
357 if (ZSTD_isError(ret
)) {
358 printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_compressStream failed, ret: %d\n",
359 KERN_ERR
, F2FS_I_SB(cc
->inode
)->sb
->s_id
,
360 __func__
, ZSTD_getErrorCode(ret
));
364 ret
= ZSTD_endStream(stream
, &outbuf
);
365 if (ZSTD_isError(ret
)) {
366 printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_endStream returned %d\n",
367 KERN_ERR
, F2FS_I_SB(cc
->inode
)->sb
->s_id
,
368 __func__
, ZSTD_getErrorCode(ret
));
373 * there is compressed data remained in intermediate buffer due to
374 * no more space in cbuf.cdata
379 cc
->clen
= outbuf
.pos
;
383 static int zstd_init_decompress_ctx(struct decompress_io_ctx
*dic
)
385 ZSTD_DStream
*stream
;
387 unsigned int workspace_size
;
389 workspace_size
= ZSTD_DStreamWorkspaceBound(MAX_COMPRESS_WINDOW_SIZE
);
391 workspace
= f2fs_kvmalloc(F2FS_I_SB(dic
->inode
),
392 workspace_size
, GFP_NOFS
);
396 stream
= ZSTD_initDStream(MAX_COMPRESS_WINDOW_SIZE
,
397 workspace
, workspace_size
);
399 printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_initDStream failed\n",
400 KERN_ERR
, F2FS_I_SB(dic
->inode
)->sb
->s_id
,
406 dic
->private = workspace
;
407 dic
->private2
= stream
;
412 static void zstd_destroy_decompress_ctx(struct decompress_io_ctx
*dic
)
414 kvfree(dic
->private);
416 dic
->private2
= NULL
;
419 static int zstd_decompress_pages(struct decompress_io_ctx
*dic
)
421 ZSTD_DStream
*stream
= dic
->private2
;
423 ZSTD_outBuffer outbuf
;
427 inbuf
.src
= dic
->cbuf
->cdata
;
428 inbuf
.size
= dic
->clen
;
431 outbuf
.dst
= dic
->rbuf
;
432 outbuf
.size
= dic
->rlen
;
434 ret
= ZSTD_decompressStream(stream
, &outbuf
, &inbuf
);
435 if (ZSTD_isError(ret
)) {
436 printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_compressStream failed, ret: %d\n",
437 KERN_ERR
, F2FS_I_SB(dic
->inode
)->sb
->s_id
,
438 __func__
, ZSTD_getErrorCode(ret
));
442 if (dic
->rlen
!= outbuf
.pos
) {
443 printk_ratelimited("%sF2FS-fs (%s): %s ZSTD invalid rlen:%zu, "
444 "expected:%lu\n", KERN_ERR
,
445 F2FS_I_SB(dic
->inode
)->sb
->s_id
,
447 PAGE_SIZE
<< dic
->log_cluster_size
);
454 static const struct f2fs_compress_ops f2fs_zstd_ops
= {
455 .init_compress_ctx
= zstd_init_compress_ctx
,
456 .destroy_compress_ctx
= zstd_destroy_compress_ctx
,
457 .compress_pages
= zstd_compress_pages
,
458 .init_decompress_ctx
= zstd_init_decompress_ctx
,
459 .destroy_decompress_ctx
= zstd_destroy_decompress_ctx
,
460 .decompress_pages
= zstd_decompress_pages
,
464 static const struct f2fs_compress_ops
*f2fs_cops
[COMPRESS_MAX
] = {
465 #ifdef CONFIG_F2FS_FS_LZO
470 #ifdef CONFIG_F2FS_FS_LZ4
475 #ifdef CONFIG_F2FS_FS_ZSTD
482 bool f2fs_is_compress_backend_ready(struct inode
*inode
)
484 if (!f2fs_compressed_file(inode
))
486 return f2fs_cops
[F2FS_I(inode
)->i_compress_algorithm
];
489 static struct page
*f2fs_grab_page(void)
493 page
= alloc_page(GFP_NOFS
);
500 static int f2fs_compress_pages(struct compress_ctx
*cc
)
502 struct f2fs_sb_info
*sbi
= F2FS_I_SB(cc
->inode
);
503 struct f2fs_inode_info
*fi
= F2FS_I(cc
->inode
);
504 const struct f2fs_compress_ops
*cops
=
505 f2fs_cops
[fi
->i_compress_algorithm
];
506 unsigned int max_len
, nr_cpages
;
509 trace_f2fs_compress_pages_start(cc
->inode
, cc
->cluster_idx
,
510 cc
->cluster_size
, fi
->i_compress_algorithm
);
512 if (cops
->init_compress_ctx
) {
513 ret
= cops
->init_compress_ctx(cc
);
518 max_len
= COMPRESS_HEADER_SIZE
+ cc
->clen
;
519 cc
->nr_cpages
= DIV_ROUND_UP(max_len
, PAGE_SIZE
);
521 cc
->cpages
= f2fs_kzalloc(sbi
, sizeof(struct page
*) *
522 cc
->nr_cpages
, GFP_NOFS
);
525 goto destroy_compress_ctx
;
528 for (i
= 0; i
< cc
->nr_cpages
; i
++) {
529 cc
->cpages
[i
] = f2fs_grab_page();
530 if (!cc
->cpages
[i
]) {
532 goto out_free_cpages
;
536 cc
->rbuf
= vmap(cc
->rpages
, cc
->cluster_size
, VM_MAP
, PAGE_KERNEL_RO
);
539 goto out_free_cpages
;
542 cc
->cbuf
= vmap(cc
->cpages
, cc
->nr_cpages
, VM_MAP
, PAGE_KERNEL
);
545 goto out_vunmap_rbuf
;
548 ret
= cops
->compress_pages(cc
);
550 goto out_vunmap_cbuf
;
552 max_len
= PAGE_SIZE
* (cc
->cluster_size
- 1) - COMPRESS_HEADER_SIZE
;
554 if (cc
->clen
> max_len
) {
556 goto out_vunmap_cbuf
;
559 cc
->cbuf
->clen
= cpu_to_le32(cc
->clen
);
561 for (i
= 0; i
< COMPRESS_DATA_RESERVED_SIZE
; i
++)
562 cc
->cbuf
->reserved
[i
] = cpu_to_le32(0);
564 nr_cpages
= DIV_ROUND_UP(cc
->clen
+ COMPRESS_HEADER_SIZE
, PAGE_SIZE
);
566 /* zero out any unused part of the last page */
567 memset(&cc
->cbuf
->cdata
[cc
->clen
], 0,
568 (nr_cpages
* PAGE_SIZE
) - (cc
->clen
+ COMPRESS_HEADER_SIZE
));
573 for (i
= nr_cpages
; i
< cc
->nr_cpages
; i
++) {
574 f2fs_put_compressed_page(cc
->cpages
[i
]);
575 cc
->cpages
[i
] = NULL
;
578 if (cops
->destroy_compress_ctx
)
579 cops
->destroy_compress_ctx(cc
);
581 cc
->nr_cpages
= nr_cpages
;
583 trace_f2fs_compress_pages_end(cc
->inode
, cc
->cluster_idx
,
592 for (i
= 0; i
< cc
->nr_cpages
; i
++) {
594 f2fs_put_compressed_page(cc
->cpages
[i
]);
598 destroy_compress_ctx
:
599 if (cops
->destroy_compress_ctx
)
600 cops
->destroy_compress_ctx(cc
);
602 trace_f2fs_compress_pages_end(cc
->inode
, cc
->cluster_idx
,
607 void f2fs_decompress_pages(struct bio
*bio
, struct page
*page
, bool verity
)
609 struct decompress_io_ctx
*dic
=
610 (struct decompress_io_ctx
*)page_private(page
);
611 struct f2fs_sb_info
*sbi
= F2FS_I_SB(dic
->inode
);
612 struct f2fs_inode_info
*fi
= F2FS_I(dic
->inode
);
613 const struct f2fs_compress_ops
*cops
=
614 f2fs_cops
[fi
->i_compress_algorithm
];
617 dec_page_count(sbi
, F2FS_RD_DATA
);
619 if (bio
->bi_status
|| PageError(page
))
622 if (refcount_dec_not_one(&dic
->ref
))
625 trace_f2fs_decompress_pages_start(dic
->inode
, dic
->cluster_idx
,
626 dic
->cluster_size
, fi
->i_compress_algorithm
);
628 /* submit partial compressed pages */
634 if (cops
->init_decompress_ctx
) {
635 ret
= cops
->init_decompress_ctx(dic
);
640 dic
->rbuf
= vmap(dic
->tpages
, dic
->cluster_size
, VM_MAP
, PAGE_KERNEL
);
643 goto destroy_decompress_ctx
;
646 dic
->cbuf
= vmap(dic
->cpages
, dic
->nr_cpages
, VM_MAP
, PAGE_KERNEL_RO
);
649 goto out_vunmap_rbuf
;
652 dic
->clen
= le32_to_cpu(dic
->cbuf
->clen
);
653 dic
->rlen
= PAGE_SIZE
<< dic
->log_cluster_size
;
655 if (dic
->clen
> PAGE_SIZE
* dic
->nr_cpages
- COMPRESS_HEADER_SIZE
) {
657 goto out_vunmap_cbuf
;
660 ret
= cops
->decompress_pages(dic
);
666 destroy_decompress_ctx
:
667 if (cops
->destroy_decompress_ctx
)
668 cops
->destroy_decompress_ctx(dic
);
671 refcount_set(&dic
->ref
, dic
->nr_cpages
);
673 f2fs_decompress_end_io(dic
->rpages
, dic
->cluster_size
,
676 trace_f2fs_decompress_pages_end(dic
->inode
, dic
->cluster_idx
,
682 static bool is_page_in_cluster(struct compress_ctx
*cc
, pgoff_t index
)
684 if (cc
->cluster_idx
== NULL_CLUSTER
)
686 return cc
->cluster_idx
== cluster_idx(cc
, index
);
689 bool f2fs_cluster_is_empty(struct compress_ctx
*cc
)
691 return cc
->nr_rpages
== 0;
694 static bool f2fs_cluster_is_full(struct compress_ctx
*cc
)
696 return cc
->cluster_size
== cc
->nr_rpages
;
699 bool f2fs_cluster_can_merge_page(struct compress_ctx
*cc
, pgoff_t index
)
701 if (f2fs_cluster_is_empty(cc
))
703 return is_page_in_cluster(cc
, index
);
706 static bool __cluster_may_compress(struct compress_ctx
*cc
)
708 struct f2fs_sb_info
*sbi
= F2FS_I_SB(cc
->inode
);
709 loff_t i_size
= i_size_read(cc
->inode
);
710 unsigned nr_pages
= DIV_ROUND_UP(i_size
, PAGE_SIZE
);
713 for (i
= 0; i
< cc
->cluster_size
; i
++) {
714 struct page
*page
= cc
->rpages
[i
];
716 f2fs_bug_on(sbi
, !page
);
718 if (unlikely(f2fs_cp_error(sbi
)))
720 if (unlikely(is_sbi_flag_set(sbi
, SBI_POR_DOING
)))
724 if (page
->index
>= nr_pages
)
730 static int __f2fs_cluster_blocks(struct compress_ctx
*cc
, bool compr
)
732 struct dnode_of_data dn
;
735 set_new_dnode(&dn
, cc
->inode
, NULL
, NULL
, 0);
736 ret
= f2fs_get_dnode_of_data(&dn
, start_idx_of_cluster(cc
),
744 if (dn
.data_blkaddr
== COMPRESS_ADDR
) {
748 for (i
= 1; i
< cc
->cluster_size
; i
++) {
751 blkaddr
= data_blkaddr(dn
.inode
,
752 dn
.node_page
, dn
.ofs_in_node
+ i
);
754 if (__is_valid_data_blkaddr(blkaddr
))
757 if (blkaddr
!= NULL_ADDR
)
767 /* return # of compressed blocks in compressed cluster */
768 static int f2fs_compressed_blocks(struct compress_ctx
*cc
)
770 return __f2fs_cluster_blocks(cc
, true);
773 /* return # of valid blocks in compressed cluster */
774 static int f2fs_cluster_blocks(struct compress_ctx
*cc
, bool compr
)
776 return __f2fs_cluster_blocks(cc
, false);
779 int f2fs_is_compressed_cluster(struct inode
*inode
, pgoff_t index
)
781 struct compress_ctx cc
= {
783 .log_cluster_size
= F2FS_I(inode
)->i_log_cluster_size
,
784 .cluster_size
= F2FS_I(inode
)->i_cluster_size
,
785 .cluster_idx
= index
>> F2FS_I(inode
)->i_log_cluster_size
,
788 return f2fs_cluster_blocks(&cc
, false);
791 static bool cluster_may_compress(struct compress_ctx
*cc
)
793 if (!f2fs_compressed_file(cc
->inode
))
795 if (f2fs_is_atomic_file(cc
->inode
))
797 if (f2fs_is_mmap_file(cc
->inode
))
799 if (!f2fs_cluster_is_full(cc
))
801 return __cluster_may_compress(cc
);
804 static void set_cluster_writeback(struct compress_ctx
*cc
)
808 for (i
= 0; i
< cc
->cluster_size
; i
++) {
810 set_page_writeback(cc
->rpages
[i
]);
814 static void set_cluster_dirty(struct compress_ctx
*cc
)
818 for (i
= 0; i
< cc
->cluster_size
; i
++)
820 set_page_dirty(cc
->rpages
[i
]);
823 static int prepare_compress_overwrite(struct compress_ctx
*cc
,
824 struct page
**pagep
, pgoff_t index
, void **fsdata
)
826 struct f2fs_sb_info
*sbi
= F2FS_I_SB(cc
->inode
);
827 struct address_space
*mapping
= cc
->inode
->i_mapping
;
829 struct dnode_of_data dn
;
830 sector_t last_block_in_bio
;
831 unsigned fgp_flag
= FGP_LOCK
| FGP_WRITE
| FGP_CREAT
;
832 pgoff_t start_idx
= start_idx_of_cluster(cc
);
837 ret
= f2fs_cluster_blocks(cc
, false);
841 /* compressed case */
842 prealloc
= (ret
< cc
->cluster_size
);
844 ret
= f2fs_init_compress_ctx(cc
);
848 /* keep page reference to avoid page reclaim */
849 for (i
= 0; i
< cc
->cluster_size
; i
++) {
850 page
= f2fs_pagecache_get_page(mapping
, start_idx
+ i
,
857 if (PageUptodate(page
))
860 f2fs_compress_ctx_add_page(cc
, page
);
863 if (!f2fs_cluster_is_empty(cc
)) {
864 struct bio
*bio
= NULL
;
866 ret
= f2fs_read_multi_pages(cc
, &bio
, cc
->cluster_size
,
867 &last_block_in_bio
, false, true);
868 f2fs_destroy_compress_ctx(cc
);
872 f2fs_submit_bio(sbi
, bio
, DATA
);
874 ret
= f2fs_init_compress_ctx(cc
);
879 for (i
= 0; i
< cc
->cluster_size
; i
++) {
880 f2fs_bug_on(sbi
, cc
->rpages
[i
]);
882 page
= find_lock_page(mapping
, start_idx
+ i
);
883 f2fs_bug_on(sbi
, !page
);
885 f2fs_wait_on_page_writeback(page
, DATA
, true, true);
887 f2fs_compress_ctx_add_page(cc
, page
);
888 f2fs_put_page(page
, 0);
890 if (!PageUptodate(page
)) {
891 f2fs_unlock_rpages(cc
, i
+ 1);
892 f2fs_put_rpages_mapping(cc
, mapping
, start_idx
,
894 f2fs_destroy_compress_ctx(cc
);
900 __do_map_lock(sbi
, F2FS_GET_BLOCK_PRE_AIO
, true);
902 set_new_dnode(&dn
, cc
->inode
, NULL
, NULL
, 0);
904 for (i
= cc
->cluster_size
- 1; i
> 0; i
--) {
905 ret
= f2fs_get_block(&dn
, start_idx
+ i
);
907 i
= cc
->cluster_size
;
911 if (dn
.data_blkaddr
!= NEW_ADDR
)
915 __do_map_lock(sbi
, F2FS_GET_BLOCK_PRE_AIO
, false);
919 *fsdata
= cc
->rpages
;
920 *pagep
= cc
->rpages
[offset_in_cluster(cc
, index
)];
921 return cc
->cluster_size
;
925 f2fs_unlock_rpages(cc
, i
);
927 f2fs_put_rpages_mapping(cc
, mapping
, start_idx
, i
);
928 f2fs_destroy_compress_ctx(cc
);
932 int f2fs_prepare_compress_overwrite(struct inode
*inode
,
933 struct page
**pagep
, pgoff_t index
, void **fsdata
)
935 struct compress_ctx cc
= {
937 .log_cluster_size
= F2FS_I(inode
)->i_log_cluster_size
,
938 .cluster_size
= F2FS_I(inode
)->i_cluster_size
,
939 .cluster_idx
= index
>> F2FS_I(inode
)->i_log_cluster_size
,
944 return prepare_compress_overwrite(&cc
, pagep
, index
, fsdata
);
947 bool f2fs_compress_write_end(struct inode
*inode
, void *fsdata
,
948 pgoff_t index
, unsigned copied
)
951 struct compress_ctx cc
= {
952 .log_cluster_size
= F2FS_I(inode
)->i_log_cluster_size
,
953 .cluster_size
= F2FS_I(inode
)->i_cluster_size
,
956 bool first_index
= (index
== cc
.rpages
[0]->index
);
959 set_cluster_dirty(&cc
);
961 f2fs_put_rpages_wbc(&cc
, NULL
, false, 1);
962 f2fs_destroy_compress_ctx(&cc
);
967 static int f2fs_write_compressed_pages(struct compress_ctx
*cc
,
969 struct writeback_control
*wbc
,
970 enum iostat_type io_type
)
972 struct inode
*inode
= cc
->inode
;
973 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
974 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
975 struct f2fs_io_info fio
= {
977 .ino
= cc
->inode
->i_ino
,
980 .op_flags
= wbc_to_write_flags(wbc
),
981 .old_blkaddr
= NEW_ADDR
,
983 .encrypted_page
= NULL
,
984 .compressed_page
= NULL
,
988 .encrypted
= f2fs_encrypted_file(cc
->inode
),
990 struct dnode_of_data dn
;
992 struct compress_io_ctx
*cic
;
993 pgoff_t start_idx
= start_idx_of_cluster(cc
);
994 unsigned int last_index
= cc
->cluster_size
- 1;
998 if (!f2fs_trylock_op(sbi
))
1001 set_new_dnode(&dn
, cc
->inode
, NULL
, NULL
, 0);
1003 err
= f2fs_get_dnode_of_data(&dn
, start_idx
, LOOKUP_NODE
);
1007 for (i
= 0; i
< cc
->cluster_size
; i
++) {
1008 if (data_blkaddr(dn
.inode
, dn
.node_page
,
1009 dn
.ofs_in_node
+ i
) == NULL_ADDR
)
1013 psize
= (loff_t
)(cc
->rpages
[last_index
]->index
+ 1) << PAGE_SHIFT
;
1015 err
= f2fs_get_node_info(fio
.sbi
, dn
.nid
, &ni
);
1019 fio
.version
= ni
.version
;
1021 cic
= f2fs_kzalloc(sbi
, sizeof(struct compress_io_ctx
), GFP_NOFS
);
1025 cic
->magic
= F2FS_COMPRESSED_PAGE_MAGIC
;
1027 refcount_set(&cic
->ref
, cc
->nr_cpages
);
1028 cic
->rpages
= f2fs_kzalloc(sbi
, sizeof(struct page
*) <<
1029 cc
->log_cluster_size
, GFP_NOFS
);
1033 cic
->nr_rpages
= cc
->cluster_size
;
1035 for (i
= 0; i
< cc
->nr_cpages
; i
++) {
1036 f2fs_set_compressed_page(cc
->cpages
[i
], inode
,
1037 cc
->rpages
[i
+ 1]->index
, cic
);
1038 fio
.compressed_page
= cc
->cpages
[i
];
1039 if (fio
.encrypted
) {
1040 fio
.page
= cc
->rpages
[i
+ 1];
1041 err
= f2fs_encrypt_one_page(&fio
);
1043 goto out_destroy_crypt
;
1044 cc
->cpages
[i
] = fio
.encrypted_page
;
1048 set_cluster_writeback(cc
);
1050 for (i
= 0; i
< cc
->cluster_size
; i
++)
1051 cic
->rpages
[i
] = cc
->rpages
[i
];
1053 for (i
= 0; i
< cc
->cluster_size
; i
++, dn
.ofs_in_node
++) {
1056 blkaddr
= f2fs_data_blkaddr(&dn
);
1057 fio
.page
= cc
->rpages
[i
];
1058 fio
.old_blkaddr
= blkaddr
;
1060 /* cluster header */
1062 if (blkaddr
== COMPRESS_ADDR
)
1064 if (__is_valid_data_blkaddr(blkaddr
))
1065 f2fs_invalidate_blocks(sbi
, blkaddr
);
1066 f2fs_update_data_blkaddr(&dn
, COMPRESS_ADDR
);
1067 goto unlock_continue
;
1070 if (fio
.compr_blocks
&& __is_valid_data_blkaddr(blkaddr
))
1073 if (i
> cc
->nr_cpages
) {
1074 if (__is_valid_data_blkaddr(blkaddr
)) {
1075 f2fs_invalidate_blocks(sbi
, blkaddr
);
1076 f2fs_update_data_blkaddr(&dn
, NEW_ADDR
);
1078 goto unlock_continue
;
1081 f2fs_bug_on(fio
.sbi
, blkaddr
== NULL_ADDR
);
1084 fio
.encrypted_page
= cc
->cpages
[i
- 1];
1086 fio
.compressed_page
= cc
->cpages
[i
- 1];
1088 cc
->cpages
[i
- 1] = NULL
;
1089 f2fs_outplace_write_data(&dn
, &fio
);
1092 inode_dec_dirty_pages(cc
->inode
);
1093 unlock_page(fio
.page
);
1096 if (fio
.compr_blocks
)
1097 f2fs_i_compr_blocks_update(inode
, fio
.compr_blocks
- 1, false);
1098 f2fs_i_compr_blocks_update(inode
, cc
->nr_cpages
, true);
1100 set_inode_flag(cc
->inode
, FI_APPEND_WRITE
);
1101 if (cc
->cluster_idx
== 0)
1102 set_inode_flag(inode
, FI_FIRST_BLOCK_WRITTEN
);
1104 f2fs_put_dnode(&dn
);
1105 f2fs_unlock_op(sbi
);
1107 spin_lock(&fi
->i_size_lock
);
1108 if (fi
->last_disk_size
< psize
)
1109 fi
->last_disk_size
= psize
;
1110 spin_unlock(&fi
->i_size_lock
);
1112 f2fs_put_rpages(cc
);
1113 f2fs_destroy_compress_ctx(cc
);
1119 for (--i
; i
>= 0; i
--)
1120 fscrypt_finalize_bounce_page(&cc
->cpages
[i
]);
1121 for (i
= 0; i
< cc
->nr_cpages
; i
++) {
1124 f2fs_put_page(cc
->cpages
[i
], 1);
1129 f2fs_put_dnode(&dn
);
1131 f2fs_unlock_op(sbi
);
1135 void f2fs_compress_write_end_io(struct bio
*bio
, struct page
*page
)
1137 struct f2fs_sb_info
*sbi
= bio
->bi_private
;
1138 struct compress_io_ctx
*cic
=
1139 (struct compress_io_ctx
*)page_private(page
);
1142 if (unlikely(bio
->bi_status
))
1143 mapping_set_error(cic
->inode
->i_mapping
, -EIO
);
1145 f2fs_put_compressed_page(page
);
1147 dec_page_count(sbi
, F2FS_WB_DATA
);
1149 if (refcount_dec_not_one(&cic
->ref
))
1152 for (i
= 0; i
< cic
->nr_rpages
; i
++) {
1153 WARN_ON(!cic
->rpages
[i
]);
1154 clear_cold_data(cic
->rpages
[i
]);
1155 end_page_writeback(cic
->rpages
[i
]);
1162 static int f2fs_write_raw_pages(struct compress_ctx
*cc
,
1164 struct writeback_control
*wbc
,
1165 enum iostat_type io_type
)
1167 struct address_space
*mapping
= cc
->inode
->i_mapping
;
1168 int _submitted
, compr_blocks
, ret
;
1169 int i
= -1, err
= 0;
1171 compr_blocks
= f2fs_compressed_blocks(cc
);
1172 if (compr_blocks
< 0) {
1177 for (i
= 0; i
< cc
->cluster_size
; i
++) {
1181 if (cc
->rpages
[i
]->mapping
!= mapping
) {
1182 unlock_page(cc
->rpages
[i
]);
1186 BUG_ON(!PageLocked(cc
->rpages
[i
]));
1188 ret
= f2fs_write_single_data_page(cc
->rpages
[i
], &_submitted
,
1189 NULL
, NULL
, wbc
, io_type
,
1192 if (ret
== AOP_WRITEPAGE_ACTIVATE
) {
1193 unlock_page(cc
->rpages
[i
]);
1195 } else if (ret
== -EAGAIN
) {
1197 * for quota file, just redirty left pages to
1198 * avoid deadlock caused by cluster update race
1199 * from foreground operation.
1201 if (IS_NOQUOTA(cc
->inode
)) {
1207 congestion_wait(BLK_RW_ASYNC
,
1208 DEFAULT_IO_TIMEOUT
);
1209 lock_page(cc
->rpages
[i
]);
1210 clear_page_dirty_for_io(cc
->rpages
[i
]);
1217 *submitted
+= _submitted
;
1221 for (++i
; i
< cc
->cluster_size
; i
++) {
1224 redirty_page_for_writepage(wbc
, cc
->rpages
[i
]);
1225 unlock_page(cc
->rpages
[i
]);
1230 int f2fs_write_multi_pages(struct compress_ctx
*cc
,
1232 struct writeback_control
*wbc
,
1233 enum iostat_type io_type
)
1235 struct f2fs_inode_info
*fi
= F2FS_I(cc
->inode
);
1236 const struct f2fs_compress_ops
*cops
=
1237 f2fs_cops
[fi
->i_compress_algorithm
];
1241 if (cluster_may_compress(cc
)) {
1242 err
= f2fs_compress_pages(cc
);
1243 if (err
== -EAGAIN
) {
1246 f2fs_put_rpages_wbc(cc
, wbc
, true, 1);
1250 err
= f2fs_write_compressed_pages(cc
, submitted
,
1252 cops
->destroy_compress_ctx(cc
);
1255 f2fs_bug_on(F2FS_I_SB(cc
->inode
), err
!= -EAGAIN
);
1258 f2fs_bug_on(F2FS_I_SB(cc
->inode
), *submitted
);
1260 err
= f2fs_write_raw_pages(cc
, submitted
, wbc
, io_type
);
1261 f2fs_put_rpages_wbc(cc
, wbc
, false, 0);
1263 f2fs_destroy_compress_ctx(cc
);
1267 struct decompress_io_ctx
*f2fs_alloc_dic(struct compress_ctx
*cc
)
1269 struct f2fs_sb_info
*sbi
= F2FS_I_SB(cc
->inode
);
1270 struct decompress_io_ctx
*dic
;
1271 pgoff_t start_idx
= start_idx_of_cluster(cc
);
1274 dic
= f2fs_kzalloc(sbi
, sizeof(struct decompress_io_ctx
), GFP_NOFS
);
1276 return ERR_PTR(-ENOMEM
);
1278 dic
->rpages
= f2fs_kzalloc(sbi
, sizeof(struct page
*) <<
1279 cc
->log_cluster_size
, GFP_NOFS
);
1282 return ERR_PTR(-ENOMEM
);
1285 dic
->magic
= F2FS_COMPRESSED_PAGE_MAGIC
;
1286 dic
->inode
= cc
->inode
;
1287 refcount_set(&dic
->ref
, cc
->nr_cpages
);
1288 dic
->cluster_idx
= cc
->cluster_idx
;
1289 dic
->cluster_size
= cc
->cluster_size
;
1290 dic
->log_cluster_size
= cc
->log_cluster_size
;
1291 dic
->nr_cpages
= cc
->nr_cpages
;
1292 dic
->failed
= false;
1294 for (i
= 0; i
< dic
->cluster_size
; i
++)
1295 dic
->rpages
[i
] = cc
->rpages
[i
];
1296 dic
->nr_rpages
= cc
->cluster_size
;
1298 dic
->cpages
= f2fs_kzalloc(sbi
, sizeof(struct page
*) *
1299 dic
->nr_cpages
, GFP_NOFS
);
1303 for (i
= 0; i
< dic
->nr_cpages
; i
++) {
1306 page
= f2fs_grab_page();
1310 f2fs_set_compressed_page(page
, cc
->inode
,
1311 start_idx
+ i
+ 1, dic
);
1312 dic
->cpages
[i
] = page
;
1315 dic
->tpages
= f2fs_kzalloc(sbi
, sizeof(struct page
*) *
1316 dic
->cluster_size
, GFP_NOFS
);
1320 for (i
= 0; i
< dic
->cluster_size
; i
++) {
1321 if (cc
->rpages
[i
]) {
1322 dic
->tpages
[i
] = cc
->rpages
[i
];
1326 dic
->tpages
[i
] = f2fs_grab_page();
1327 if (!dic
->tpages
[i
])
1335 return ERR_PTR(-ENOMEM
);
1338 void f2fs_free_dic(struct decompress_io_ctx
*dic
)
1343 for (i
= 0; i
< dic
->cluster_size
; i
++) {
1346 if (!dic
->tpages
[i
])
1348 unlock_page(dic
->tpages
[i
]);
1349 put_page(dic
->tpages
[i
]);
1355 for (i
= 0; i
< dic
->nr_cpages
; i
++) {
1356 if (!dic
->cpages
[i
])
1358 f2fs_put_compressed_page(dic
->cpages
[i
]);
1367 void f2fs_decompress_end_io(struct page
**rpages
,
1368 unsigned int cluster_size
, bool err
, bool verity
)
1372 for (i
= 0; i
< cluster_size
; i
++) {
1373 struct page
*rpage
= rpages
[i
];
1378 if (err
|| PageError(rpage
))
1379 goto clear_uptodate
;
1381 if (!verity
|| fsverity_verify_page(rpage
)) {
1382 SetPageUptodate(rpage
);
1386 ClearPageUptodate(rpage
);
1387 ClearPageError(rpage
);