1 // SPDX-License-Identifier: GPL-2.0
3 * f2fs compress support
5 * Copyright (c) 2019 Chao Yu <chao@kernel.org>
9 #include <linux/f2fs_fs.h>
10 #include <linux/writeback.h>
11 #include <linux/backing-dev.h>
12 #include <linux/lzo.h>
13 #include <linux/lz4.h>
17 #include <trace/events/f2fs.h>
19 struct f2fs_compress_ops
{
20 int (*init_compress_ctx
)(struct compress_ctx
*cc
);
21 void (*destroy_compress_ctx
)(struct compress_ctx
*cc
);
22 int (*compress_pages
)(struct compress_ctx
*cc
);
23 int (*decompress_pages
)(struct decompress_io_ctx
*dic
);
26 static unsigned int offset_in_cluster(struct compress_ctx
*cc
, pgoff_t index
)
28 return index
& (cc
->cluster_size
- 1);
31 static pgoff_t
cluster_idx(struct compress_ctx
*cc
, pgoff_t index
)
33 return index
>> cc
->log_cluster_size
;
36 static pgoff_t
start_idx_of_cluster(struct compress_ctx
*cc
)
38 return cc
->cluster_idx
<< cc
->log_cluster_size
;
41 bool f2fs_is_compressed_page(struct page
*page
)
43 if (!PagePrivate(page
))
45 if (!page_private(page
))
47 if (IS_ATOMIC_WRITTEN_PAGE(page
) || IS_DUMMY_WRITTEN_PAGE(page
))
49 f2fs_bug_on(F2FS_M_SB(page
->mapping
),
50 *((u32
*)page_private(page
)) != F2FS_COMPRESSED_PAGE_MAGIC
);
54 static void f2fs_set_compressed_page(struct page
*page
,
55 struct inode
*inode
, pgoff_t index
, void *data
, refcount_t
*r
)
58 set_page_private(page
, (unsigned long)data
);
60 /* i_crypto_info and iv index */
62 page
->mapping
= inode
->i_mapping
;
67 static void f2fs_put_compressed_page(struct page
*page
)
69 set_page_private(page
, (unsigned long)NULL
);
70 ClearPagePrivate(page
);
76 static void f2fs_drop_rpages(struct compress_ctx
*cc
, int len
, bool unlock
)
80 for (i
= 0; i
< len
; i
++) {
84 unlock_page(cc
->rpages
[i
]);
86 put_page(cc
->rpages
[i
]);
90 static void f2fs_put_rpages(struct compress_ctx
*cc
)
92 f2fs_drop_rpages(cc
, cc
->cluster_size
, false);
95 static void f2fs_unlock_rpages(struct compress_ctx
*cc
, int len
)
97 f2fs_drop_rpages(cc
, len
, true);
100 static void f2fs_put_rpages_mapping(struct compress_ctx
*cc
,
101 struct address_space
*mapping
,
102 pgoff_t start
, int len
)
106 for (i
= 0; i
< len
; i
++) {
107 struct page
*page
= find_get_page(mapping
, start
+ i
);
114 static void f2fs_put_rpages_wbc(struct compress_ctx
*cc
,
115 struct writeback_control
*wbc
, bool redirty
, int unlock
)
119 for (i
= 0; i
< cc
->cluster_size
; i
++) {
123 redirty_page_for_writepage(wbc
, cc
->rpages
[i
]);
124 f2fs_put_page(cc
->rpages
[i
], unlock
);
128 struct page
*f2fs_compress_control_page(struct page
*page
)
130 return ((struct compress_io_ctx
*)page_private(page
))->rpages
[0];
133 int f2fs_init_compress_ctx(struct compress_ctx
*cc
)
135 struct f2fs_sb_info
*sbi
= F2FS_I_SB(cc
->inode
);
140 cc
->rpages
= f2fs_kzalloc(sbi
, sizeof(struct page
*) <<
141 cc
->log_cluster_size
, GFP_NOFS
);
142 return cc
->rpages
? 0 : -ENOMEM
;
145 void f2fs_destroy_compress_ctx(struct compress_ctx
*cc
)
151 cc
->cluster_idx
= NULL_CLUSTER
;
154 void f2fs_compress_ctx_add_page(struct compress_ctx
*cc
, struct page
*page
)
156 unsigned int cluster_ofs
;
158 if (!f2fs_cluster_can_merge_page(cc
, page
->index
))
159 f2fs_bug_on(F2FS_I_SB(cc
->inode
), 1);
161 cluster_ofs
= offset_in_cluster(cc
, page
->index
);
162 cc
->rpages
[cluster_ofs
] = page
;
164 cc
->cluster_idx
= cluster_idx(cc
, page
->index
);
167 #ifdef CONFIG_F2FS_FS_LZO
168 static int lzo_init_compress_ctx(struct compress_ctx
*cc
)
170 cc
->private = f2fs_kvmalloc(F2FS_I_SB(cc
->inode
),
171 LZO1X_MEM_COMPRESS
, GFP_NOFS
);
175 cc
->clen
= lzo1x_worst_compress(PAGE_SIZE
<< cc
->log_cluster_size
);
179 static void lzo_destroy_compress_ctx(struct compress_ctx
*cc
)
185 static int lzo_compress_pages(struct compress_ctx
*cc
)
189 ret
= lzo1x_1_compress(cc
->rbuf
, cc
->rlen
, cc
->cbuf
->cdata
,
190 &cc
->clen
, cc
->private);
191 if (ret
!= LZO_E_OK
) {
192 printk_ratelimited("%sF2FS-fs (%s): lzo compress failed, ret:%d\n",
193 KERN_ERR
, F2FS_I_SB(cc
->inode
)->sb
->s_id
, ret
);
199 static int lzo_decompress_pages(struct decompress_io_ctx
*dic
)
203 ret
= lzo1x_decompress_safe(dic
->cbuf
->cdata
, dic
->clen
,
204 dic
->rbuf
, &dic
->rlen
);
205 if (ret
!= LZO_E_OK
) {
206 printk_ratelimited("%sF2FS-fs (%s): lzo decompress failed, ret:%d\n",
207 KERN_ERR
, F2FS_I_SB(dic
->inode
)->sb
->s_id
, ret
);
211 if (dic
->rlen
!= PAGE_SIZE
<< dic
->log_cluster_size
) {
212 printk_ratelimited("%sF2FS-fs (%s): lzo invalid rlen:%zu, "
213 "expected:%lu\n", KERN_ERR
,
214 F2FS_I_SB(dic
->inode
)->sb
->s_id
,
216 PAGE_SIZE
<< dic
->log_cluster_size
);
222 static const struct f2fs_compress_ops f2fs_lzo_ops
= {
223 .init_compress_ctx
= lzo_init_compress_ctx
,
224 .destroy_compress_ctx
= lzo_destroy_compress_ctx
,
225 .compress_pages
= lzo_compress_pages
,
226 .decompress_pages
= lzo_decompress_pages
,
230 #ifdef CONFIG_F2FS_FS_LZ4
231 static int lz4_init_compress_ctx(struct compress_ctx
*cc
)
233 cc
->private = f2fs_kvmalloc(F2FS_I_SB(cc
->inode
),
234 LZ4_MEM_COMPRESS
, GFP_NOFS
);
238 cc
->clen
= LZ4_compressBound(PAGE_SIZE
<< cc
->log_cluster_size
);
242 static void lz4_destroy_compress_ctx(struct compress_ctx
*cc
)
248 static int lz4_compress_pages(struct compress_ctx
*cc
)
252 len
= LZ4_compress_default(cc
->rbuf
, cc
->cbuf
->cdata
, cc
->rlen
,
253 cc
->clen
, cc
->private);
255 printk_ratelimited("%sF2FS-fs (%s): lz4 compress failed\n",
256 KERN_ERR
, F2FS_I_SB(cc
->inode
)->sb
->s_id
);
263 static int lz4_decompress_pages(struct decompress_io_ctx
*dic
)
267 ret
= LZ4_decompress_safe(dic
->cbuf
->cdata
, dic
->rbuf
,
268 dic
->clen
, dic
->rlen
);
270 printk_ratelimited("%sF2FS-fs (%s): lz4 decompress failed, ret:%d\n",
271 KERN_ERR
, F2FS_I_SB(dic
->inode
)->sb
->s_id
, ret
);
275 if (ret
!= PAGE_SIZE
<< dic
->log_cluster_size
) {
276 printk_ratelimited("%sF2FS-fs (%s): lz4 invalid rlen:%zu, "
277 "expected:%lu\n", KERN_ERR
,
278 F2FS_I_SB(dic
->inode
)->sb
->s_id
,
280 PAGE_SIZE
<< dic
->log_cluster_size
);
286 static const struct f2fs_compress_ops f2fs_lz4_ops
= {
287 .init_compress_ctx
= lz4_init_compress_ctx
,
288 .destroy_compress_ctx
= lz4_destroy_compress_ctx
,
289 .compress_pages
= lz4_compress_pages
,
290 .decompress_pages
= lz4_decompress_pages
,
294 static const struct f2fs_compress_ops
*f2fs_cops
[COMPRESS_MAX
] = {
295 #ifdef CONFIG_F2FS_FS_LZO
300 #ifdef CONFIG_F2FS_FS_LZ4
307 bool f2fs_is_compress_backend_ready(struct inode
*inode
)
309 if (!f2fs_compressed_file(inode
))
311 return f2fs_cops
[F2FS_I(inode
)->i_compress_algorithm
];
314 static struct page
*f2fs_grab_page(void)
318 page
= alloc_page(GFP_NOFS
);
325 static int f2fs_compress_pages(struct compress_ctx
*cc
)
327 struct f2fs_sb_info
*sbi
= F2FS_I_SB(cc
->inode
);
328 struct f2fs_inode_info
*fi
= F2FS_I(cc
->inode
);
329 const struct f2fs_compress_ops
*cops
=
330 f2fs_cops
[fi
->i_compress_algorithm
];
331 unsigned int max_len
, nr_cpages
;
334 trace_f2fs_compress_pages_start(cc
->inode
, cc
->cluster_idx
,
335 cc
->cluster_size
, fi
->i_compress_algorithm
);
337 ret
= cops
->init_compress_ctx(cc
);
341 max_len
= COMPRESS_HEADER_SIZE
+ cc
->clen
;
342 cc
->nr_cpages
= DIV_ROUND_UP(max_len
, PAGE_SIZE
);
344 cc
->cpages
= f2fs_kzalloc(sbi
, sizeof(struct page
*) *
345 cc
->nr_cpages
, GFP_NOFS
);
348 goto destroy_compress_ctx
;
351 for (i
= 0; i
< cc
->nr_cpages
; i
++) {
352 cc
->cpages
[i
] = f2fs_grab_page();
353 if (!cc
->cpages
[i
]) {
355 goto out_free_cpages
;
359 cc
->rbuf
= vmap(cc
->rpages
, cc
->cluster_size
, VM_MAP
, PAGE_KERNEL_RO
);
362 goto out_free_cpages
;
365 cc
->cbuf
= vmap(cc
->cpages
, cc
->nr_cpages
, VM_MAP
, PAGE_KERNEL
);
368 goto out_vunmap_rbuf
;
371 ret
= cops
->compress_pages(cc
);
373 goto out_vunmap_cbuf
;
375 max_len
= PAGE_SIZE
* (cc
->cluster_size
- 1) - COMPRESS_HEADER_SIZE
;
377 if (cc
->clen
> max_len
) {
379 goto out_vunmap_cbuf
;
382 cc
->cbuf
->clen
= cpu_to_le32(cc
->clen
);
383 cc
->cbuf
->chksum
= cpu_to_le32(0);
385 for (i
= 0; i
< COMPRESS_DATA_RESERVED_SIZE
; i
++)
386 cc
->cbuf
->reserved
[i
] = cpu_to_le32(0);
388 nr_cpages
= DIV_ROUND_UP(cc
->clen
+ COMPRESS_HEADER_SIZE
, PAGE_SIZE
);
390 /* zero out any unused part of the last page */
391 memset(&cc
->cbuf
->cdata
[cc
->clen
], 0,
392 (nr_cpages
* PAGE_SIZE
) - (cc
->clen
+ COMPRESS_HEADER_SIZE
));
397 for (i
= nr_cpages
; i
< cc
->nr_cpages
; i
++) {
398 f2fs_put_compressed_page(cc
->cpages
[i
]);
399 cc
->cpages
[i
] = NULL
;
402 cops
->destroy_compress_ctx(cc
);
404 cc
->nr_cpages
= nr_cpages
;
406 trace_f2fs_compress_pages_end(cc
->inode
, cc
->cluster_idx
,
415 for (i
= 0; i
< cc
->nr_cpages
; i
++) {
417 f2fs_put_compressed_page(cc
->cpages
[i
]);
421 destroy_compress_ctx
:
422 cops
->destroy_compress_ctx(cc
);
424 trace_f2fs_compress_pages_end(cc
->inode
, cc
->cluster_idx
,
429 void f2fs_decompress_pages(struct bio
*bio
, struct page
*page
, bool verity
)
431 struct decompress_io_ctx
*dic
=
432 (struct decompress_io_ctx
*)page_private(page
);
433 struct f2fs_sb_info
*sbi
= F2FS_I_SB(dic
->inode
);
434 struct f2fs_inode_info
*fi
= F2FS_I(dic
->inode
);
435 const struct f2fs_compress_ops
*cops
=
436 f2fs_cops
[fi
->i_compress_algorithm
];
439 dec_page_count(sbi
, F2FS_RD_DATA
);
441 if (bio
->bi_status
|| PageError(page
))
444 if (refcount_dec_not_one(&dic
->ref
))
447 trace_f2fs_decompress_pages_start(dic
->inode
, dic
->cluster_idx
,
448 dic
->cluster_size
, fi
->i_compress_algorithm
);
450 /* submit partial compressed pages */
456 dic
->rbuf
= vmap(dic
->tpages
, dic
->cluster_size
, VM_MAP
, PAGE_KERNEL
);
462 dic
->cbuf
= vmap(dic
->cpages
, dic
->nr_cpages
, VM_MAP
, PAGE_KERNEL_RO
);
465 goto out_vunmap_rbuf
;
468 dic
->clen
= le32_to_cpu(dic
->cbuf
->clen
);
469 dic
->rlen
= PAGE_SIZE
<< dic
->log_cluster_size
;
471 if (dic
->clen
> PAGE_SIZE
* dic
->nr_cpages
- COMPRESS_HEADER_SIZE
) {
473 goto out_vunmap_cbuf
;
476 ret
= cops
->decompress_pages(dic
);
484 refcount_add(dic
->nr_cpages
- 1, &dic
->ref
);
486 f2fs_decompress_end_io(dic
->rpages
, dic
->cluster_size
,
489 trace_f2fs_decompress_pages_end(dic
->inode
, dic
->cluster_idx
,
495 static bool is_page_in_cluster(struct compress_ctx
*cc
, pgoff_t index
)
497 if (cc
->cluster_idx
== NULL_CLUSTER
)
499 return cc
->cluster_idx
== cluster_idx(cc
, index
);
502 bool f2fs_cluster_is_empty(struct compress_ctx
*cc
)
504 return cc
->nr_rpages
== 0;
507 static bool f2fs_cluster_is_full(struct compress_ctx
*cc
)
509 return cc
->cluster_size
== cc
->nr_rpages
;
512 bool f2fs_cluster_can_merge_page(struct compress_ctx
*cc
, pgoff_t index
)
514 if (f2fs_cluster_is_empty(cc
))
516 return is_page_in_cluster(cc
, index
);
519 static bool __cluster_may_compress(struct compress_ctx
*cc
)
521 struct f2fs_sb_info
*sbi
= F2FS_I_SB(cc
->inode
);
522 loff_t i_size
= i_size_read(cc
->inode
);
523 unsigned nr_pages
= DIV_ROUND_UP(i_size
, PAGE_SIZE
);
526 for (i
= 0; i
< cc
->cluster_size
; i
++) {
527 struct page
*page
= cc
->rpages
[i
];
529 f2fs_bug_on(sbi
, !page
);
531 if (unlikely(f2fs_cp_error(sbi
)))
533 if (unlikely(is_sbi_flag_set(sbi
, SBI_POR_DOING
)))
537 if (page
->index
>= nr_pages
)
543 static int __f2fs_cluster_blocks(struct compress_ctx
*cc
, bool compr
)
545 struct dnode_of_data dn
;
548 set_new_dnode(&dn
, cc
->inode
, NULL
, NULL
, 0);
549 ret
= f2fs_get_dnode_of_data(&dn
, start_idx_of_cluster(cc
),
557 if (dn
.data_blkaddr
== COMPRESS_ADDR
) {
561 for (i
= 1; i
< cc
->cluster_size
; i
++) {
564 blkaddr
= datablock_addr(dn
.inode
,
565 dn
.node_page
, dn
.ofs_in_node
+ i
);
567 if (__is_valid_data_blkaddr(blkaddr
))
570 if (blkaddr
!= NULL_ADDR
)
580 /* return # of compressed blocks in compressed cluster */
581 static int f2fs_compressed_blocks(struct compress_ctx
*cc
)
583 return __f2fs_cluster_blocks(cc
, true);
586 /* return # of valid blocks in compressed cluster */
587 static int f2fs_cluster_blocks(struct compress_ctx
*cc
, bool compr
)
589 return __f2fs_cluster_blocks(cc
, false);
592 int f2fs_is_compressed_cluster(struct inode
*inode
, pgoff_t index
)
594 struct compress_ctx cc
= {
596 .log_cluster_size
= F2FS_I(inode
)->i_log_cluster_size
,
597 .cluster_size
= F2FS_I(inode
)->i_cluster_size
,
598 .cluster_idx
= index
>> F2FS_I(inode
)->i_log_cluster_size
,
601 return f2fs_cluster_blocks(&cc
, false);
604 static bool cluster_may_compress(struct compress_ctx
*cc
)
606 if (!f2fs_compressed_file(cc
->inode
))
608 if (f2fs_is_atomic_file(cc
->inode
))
610 if (f2fs_is_mmap_file(cc
->inode
))
612 if (!f2fs_cluster_is_full(cc
))
614 return __cluster_may_compress(cc
);
617 static void set_cluster_writeback(struct compress_ctx
*cc
)
621 for (i
= 0; i
< cc
->cluster_size
; i
++) {
623 set_page_writeback(cc
->rpages
[i
]);
627 static void set_cluster_dirty(struct compress_ctx
*cc
)
631 for (i
= 0; i
< cc
->cluster_size
; i
++)
633 set_page_dirty(cc
->rpages
[i
]);
636 static int prepare_compress_overwrite(struct compress_ctx
*cc
,
637 struct page
**pagep
, pgoff_t index
, void **fsdata
)
639 struct f2fs_sb_info
*sbi
= F2FS_I_SB(cc
->inode
);
640 struct address_space
*mapping
= cc
->inode
->i_mapping
;
642 struct dnode_of_data dn
;
643 sector_t last_block_in_bio
;
644 unsigned fgp_flag
= FGP_LOCK
| FGP_WRITE
| FGP_CREAT
;
645 pgoff_t start_idx
= start_idx_of_cluster(cc
);
650 ret
= f2fs_cluster_blocks(cc
, false);
654 /* compressed case */
655 prealloc
= (ret
< cc
->cluster_size
);
657 ret
= f2fs_init_compress_ctx(cc
);
661 /* keep page reference to avoid page reclaim */
662 for (i
= 0; i
< cc
->cluster_size
; i
++) {
663 page
= f2fs_pagecache_get_page(mapping
, start_idx
+ i
,
670 if (PageUptodate(page
))
673 f2fs_compress_ctx_add_page(cc
, page
);
676 if (!f2fs_cluster_is_empty(cc
)) {
677 struct bio
*bio
= NULL
;
679 ret
= f2fs_read_multi_pages(cc
, &bio
, cc
->cluster_size
,
680 &last_block_in_bio
, false);
681 f2fs_destroy_compress_ctx(cc
);
685 f2fs_submit_bio(sbi
, bio
, DATA
);
687 ret
= f2fs_init_compress_ctx(cc
);
692 for (i
= 0; i
< cc
->cluster_size
; i
++) {
693 f2fs_bug_on(sbi
, cc
->rpages
[i
]);
695 page
= find_lock_page(mapping
, start_idx
+ i
);
696 f2fs_bug_on(sbi
, !page
);
698 f2fs_wait_on_page_writeback(page
, DATA
, true, true);
700 f2fs_compress_ctx_add_page(cc
, page
);
701 f2fs_put_page(page
, 0);
703 if (!PageUptodate(page
)) {
704 f2fs_unlock_rpages(cc
, i
+ 1);
705 f2fs_put_rpages_mapping(cc
, mapping
, start_idx
,
707 f2fs_destroy_compress_ctx(cc
);
713 __do_map_lock(sbi
, F2FS_GET_BLOCK_PRE_AIO
, true);
715 set_new_dnode(&dn
, cc
->inode
, NULL
, NULL
, 0);
717 for (i
= cc
->cluster_size
- 1; i
> 0; i
--) {
718 ret
= f2fs_get_block(&dn
, start_idx
+ i
);
720 i
= cc
->cluster_size
;
724 if (dn
.data_blkaddr
!= NEW_ADDR
)
728 __do_map_lock(sbi
, F2FS_GET_BLOCK_PRE_AIO
, false);
732 *fsdata
= cc
->rpages
;
733 *pagep
= cc
->rpages
[offset_in_cluster(cc
, index
)];
734 return cc
->cluster_size
;
738 f2fs_unlock_rpages(cc
, i
);
740 f2fs_put_rpages_mapping(cc
, mapping
, start_idx
, i
);
741 f2fs_destroy_compress_ctx(cc
);
745 int f2fs_prepare_compress_overwrite(struct inode
*inode
,
746 struct page
**pagep
, pgoff_t index
, void **fsdata
)
748 struct compress_ctx cc
= {
750 .log_cluster_size
= F2FS_I(inode
)->i_log_cluster_size
,
751 .cluster_size
= F2FS_I(inode
)->i_cluster_size
,
752 .cluster_idx
= index
>> F2FS_I(inode
)->i_log_cluster_size
,
757 return prepare_compress_overwrite(&cc
, pagep
, index
, fsdata
);
760 bool f2fs_compress_write_end(struct inode
*inode
, void *fsdata
,
761 pgoff_t index
, unsigned copied
)
764 struct compress_ctx cc
= {
765 .log_cluster_size
= F2FS_I(inode
)->i_log_cluster_size
,
766 .cluster_size
= F2FS_I(inode
)->i_cluster_size
,
769 bool first_index
= (index
== cc
.rpages
[0]->index
);
772 set_cluster_dirty(&cc
);
774 f2fs_put_rpages_wbc(&cc
, NULL
, false, 1);
775 f2fs_destroy_compress_ctx(&cc
);
780 static int f2fs_write_compressed_pages(struct compress_ctx
*cc
,
782 struct writeback_control
*wbc
,
783 enum iostat_type io_type
)
785 struct inode
*inode
= cc
->inode
;
786 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
787 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
788 struct f2fs_io_info fio
= {
790 .ino
= cc
->inode
->i_ino
,
793 .op_flags
= wbc_to_write_flags(wbc
),
794 .old_blkaddr
= NEW_ADDR
,
796 .encrypted_page
= NULL
,
797 .compressed_page
= NULL
,
801 .encrypted
= f2fs_encrypted_file(cc
->inode
),
803 struct dnode_of_data dn
;
805 struct compress_io_ctx
*cic
;
806 pgoff_t start_idx
= start_idx_of_cluster(cc
);
807 unsigned int last_index
= cc
->cluster_size
- 1;
811 if (!f2fs_trylock_op(sbi
))
814 set_new_dnode(&dn
, cc
->inode
, NULL
, NULL
, 0);
816 err
= f2fs_get_dnode_of_data(&dn
, start_idx
, LOOKUP_NODE
);
820 for (i
= 0; i
< cc
->cluster_size
; i
++) {
821 if (datablock_addr(dn
.inode
, dn
.node_page
,
822 dn
.ofs_in_node
+ i
) == NULL_ADDR
)
826 psize
= (loff_t
)(cc
->rpages
[last_index
]->index
+ 1) << PAGE_SHIFT
;
828 err
= f2fs_get_node_info(fio
.sbi
, dn
.nid
, &ni
);
832 fio
.version
= ni
.version
;
834 cic
= f2fs_kzalloc(sbi
, sizeof(struct compress_io_ctx
), GFP_NOFS
);
838 cic
->magic
= F2FS_COMPRESSED_PAGE_MAGIC
;
840 refcount_set(&cic
->ref
, 1);
841 cic
->rpages
= f2fs_kzalloc(sbi
, sizeof(struct page
*) <<
842 cc
->log_cluster_size
, GFP_NOFS
);
846 cic
->nr_rpages
= cc
->cluster_size
;
848 for (i
= 0; i
< cc
->nr_cpages
; i
++) {
849 f2fs_set_compressed_page(cc
->cpages
[i
], inode
,
850 cc
->rpages
[i
+ 1]->index
,
851 cic
, i
? &cic
->ref
: NULL
);
852 fio
.compressed_page
= cc
->cpages
[i
];
854 fio
.page
= cc
->rpages
[i
+ 1];
855 err
= f2fs_encrypt_one_page(&fio
);
857 goto out_destroy_crypt
;
858 cc
->cpages
[i
] = fio
.encrypted_page
;
862 set_cluster_writeback(cc
);
864 for (i
= 0; i
< cc
->cluster_size
; i
++)
865 cic
->rpages
[i
] = cc
->rpages
[i
];
867 for (i
= 0; i
< cc
->cluster_size
; i
++, dn
.ofs_in_node
++) {
870 blkaddr
= datablock_addr(dn
.inode
, dn
.node_page
,
872 fio
.page
= cc
->rpages
[i
];
873 fio
.old_blkaddr
= blkaddr
;
877 if (blkaddr
== COMPRESS_ADDR
)
879 if (__is_valid_data_blkaddr(blkaddr
))
880 f2fs_invalidate_blocks(sbi
, blkaddr
);
881 f2fs_update_data_blkaddr(&dn
, COMPRESS_ADDR
);
882 goto unlock_continue
;
885 if (fio
.compr_blocks
&& __is_valid_data_blkaddr(blkaddr
))
888 if (i
> cc
->nr_cpages
) {
889 if (__is_valid_data_blkaddr(blkaddr
)) {
890 f2fs_invalidate_blocks(sbi
, blkaddr
);
891 f2fs_update_data_blkaddr(&dn
, NEW_ADDR
);
893 goto unlock_continue
;
896 f2fs_bug_on(fio
.sbi
, blkaddr
== NULL_ADDR
);
899 fio
.encrypted_page
= cc
->cpages
[i
- 1];
901 fio
.compressed_page
= cc
->cpages
[i
- 1];
903 cc
->cpages
[i
- 1] = NULL
;
904 f2fs_outplace_write_data(&dn
, &fio
);
907 inode_dec_dirty_pages(cc
->inode
);
908 unlock_page(fio
.page
);
911 if (fio
.compr_blocks
)
912 f2fs_i_compr_blocks_update(inode
, fio
.compr_blocks
- 1, false);
913 f2fs_i_compr_blocks_update(inode
, cc
->nr_cpages
, true);
915 set_inode_flag(cc
->inode
, FI_APPEND_WRITE
);
916 if (cc
->cluster_idx
== 0)
917 set_inode_flag(inode
, FI_FIRST_BLOCK_WRITTEN
);
922 down_write(&fi
->i_sem
);
923 if (fi
->last_disk_size
< psize
)
924 fi
->last_disk_size
= psize
;
925 up_write(&fi
->i_sem
);
928 f2fs_destroy_compress_ctx(cc
);
934 for (--i
; i
>= 0; i
--)
935 fscrypt_finalize_bounce_page(&cc
->cpages
[i
]);
936 for (i
= 0; i
< cc
->nr_cpages
; i
++) {
939 f2fs_put_page(cc
->cpages
[i
], 1);
950 void f2fs_compress_write_end_io(struct bio
*bio
, struct page
*page
)
952 struct f2fs_sb_info
*sbi
= bio
->bi_private
;
953 struct compress_io_ctx
*cic
=
954 (struct compress_io_ctx
*)page_private(page
);
957 if (unlikely(bio
->bi_status
))
958 mapping_set_error(cic
->inode
->i_mapping
, -EIO
);
960 f2fs_put_compressed_page(page
);
962 dec_page_count(sbi
, F2FS_WB_DATA
);
964 if (refcount_dec_not_one(&cic
->ref
))
967 for (i
= 0; i
< cic
->nr_rpages
; i
++) {
968 WARN_ON(!cic
->rpages
[i
]);
969 clear_cold_data(cic
->rpages
[i
]);
970 end_page_writeback(cic
->rpages
[i
]);
977 static int f2fs_write_raw_pages(struct compress_ctx
*cc
,
979 struct writeback_control
*wbc
,
980 enum iostat_type io_type
)
982 struct address_space
*mapping
= cc
->inode
->i_mapping
;
983 int _submitted
, compr_blocks
, ret
;
986 compr_blocks
= f2fs_compressed_blocks(cc
);
987 if (compr_blocks
< 0) {
992 for (i
= 0; i
< cc
->cluster_size
; i
++) {
996 if (cc
->rpages
[i
]->mapping
!= mapping
) {
997 unlock_page(cc
->rpages
[i
]);
1001 BUG_ON(!PageLocked(cc
->rpages
[i
]));
1003 ret
= f2fs_write_single_data_page(cc
->rpages
[i
], &_submitted
,
1004 NULL
, NULL
, wbc
, io_type
,
1007 if (ret
== AOP_WRITEPAGE_ACTIVATE
) {
1008 unlock_page(cc
->rpages
[i
]);
1010 } else if (ret
== -EAGAIN
) {
1012 * for quota file, just redirty left pages to
1013 * avoid deadlock caused by cluster update race
1014 * from foreground operation.
1016 if (IS_NOQUOTA(cc
->inode
)) {
1022 congestion_wait(BLK_RW_ASYNC
, HZ
/50);
1023 lock_page(cc
->rpages
[i
]);
1024 clear_page_dirty_for_io(cc
->rpages
[i
]);
1031 *submitted
+= _submitted
;
1035 for (++i
; i
< cc
->cluster_size
; i
++) {
1038 redirty_page_for_writepage(wbc
, cc
->rpages
[i
]);
1039 unlock_page(cc
->rpages
[i
]);
1044 int f2fs_write_multi_pages(struct compress_ctx
*cc
,
1046 struct writeback_control
*wbc
,
1047 enum iostat_type io_type
)
1049 struct f2fs_inode_info
*fi
= F2FS_I(cc
->inode
);
1050 const struct f2fs_compress_ops
*cops
=
1051 f2fs_cops
[fi
->i_compress_algorithm
];
1055 if (cluster_may_compress(cc
)) {
1056 err
= f2fs_compress_pages(cc
);
1057 if (err
== -EAGAIN
) {
1060 f2fs_put_rpages_wbc(cc
, wbc
, true, 1);
1064 err
= f2fs_write_compressed_pages(cc
, submitted
,
1066 cops
->destroy_compress_ctx(cc
);
1069 f2fs_bug_on(F2FS_I_SB(cc
->inode
), err
!= -EAGAIN
);
1072 f2fs_bug_on(F2FS_I_SB(cc
->inode
), *submitted
);
1074 err
= f2fs_write_raw_pages(cc
, submitted
, wbc
, io_type
);
1075 f2fs_put_rpages_wbc(cc
, wbc
, false, 0);
1077 f2fs_destroy_compress_ctx(cc
);
1081 struct decompress_io_ctx
*f2fs_alloc_dic(struct compress_ctx
*cc
)
1083 struct f2fs_sb_info
*sbi
= F2FS_I_SB(cc
->inode
);
1084 struct decompress_io_ctx
*dic
;
1085 pgoff_t start_idx
= start_idx_of_cluster(cc
);
1088 dic
= f2fs_kzalloc(sbi
, sizeof(struct decompress_io_ctx
), GFP_NOFS
);
1090 return ERR_PTR(-ENOMEM
);
1092 dic
->rpages
= f2fs_kzalloc(sbi
, sizeof(struct page
*) <<
1093 cc
->log_cluster_size
, GFP_NOFS
);
1096 return ERR_PTR(-ENOMEM
);
1099 dic
->magic
= F2FS_COMPRESSED_PAGE_MAGIC
;
1100 dic
->inode
= cc
->inode
;
1101 refcount_set(&dic
->ref
, 1);
1102 dic
->cluster_idx
= cc
->cluster_idx
;
1103 dic
->cluster_size
= cc
->cluster_size
;
1104 dic
->log_cluster_size
= cc
->log_cluster_size
;
1105 dic
->nr_cpages
= cc
->nr_cpages
;
1106 dic
->failed
= false;
1108 for (i
= 0; i
< dic
->cluster_size
; i
++)
1109 dic
->rpages
[i
] = cc
->rpages
[i
];
1110 dic
->nr_rpages
= cc
->cluster_size
;
1112 dic
->cpages
= f2fs_kzalloc(sbi
, sizeof(struct page
*) *
1113 dic
->nr_cpages
, GFP_NOFS
);
1117 for (i
= 0; i
< dic
->nr_cpages
; i
++) {
1120 page
= f2fs_grab_page();
1124 f2fs_set_compressed_page(page
, cc
->inode
,
1126 dic
, i
? &dic
->ref
: NULL
);
1127 dic
->cpages
[i
] = page
;
1130 dic
->tpages
= f2fs_kzalloc(sbi
, sizeof(struct page
*) *
1131 dic
->cluster_size
, GFP_NOFS
);
1135 for (i
= 0; i
< dic
->cluster_size
; i
++) {
1139 dic
->tpages
[i
] = f2fs_grab_page();
1140 if (!dic
->tpages
[i
])
1144 for (i
= 0; i
< dic
->cluster_size
; i
++) {
1147 dic
->tpages
[i
] = cc
->rpages
[i
];
1154 return ERR_PTR(-ENOMEM
);
1157 void f2fs_free_dic(struct decompress_io_ctx
*dic
)
1162 for (i
= 0; i
< dic
->cluster_size
; i
++) {
1165 f2fs_put_page(dic
->tpages
[i
], 1);
1171 for (i
= 0; i
< dic
->nr_cpages
; i
++) {
1172 if (!dic
->cpages
[i
])
1174 f2fs_put_compressed_page(dic
->cpages
[i
]);
1183 void f2fs_decompress_end_io(struct page
**rpages
,
1184 unsigned int cluster_size
, bool err
, bool verity
)
1188 for (i
= 0; i
< cluster_size
; i
++) {
1189 struct page
*rpage
= rpages
[i
];
1194 if (err
|| PageError(rpage
)) {
1195 ClearPageUptodate(rpage
);
1196 ClearPageError(rpage
);
1198 if (!verity
|| fsverity_verify_page(rpage
))
1199 SetPageUptodate(rpage
);
1201 SetPageError(rpage
);