1 // SPDX-License-Identifier: GPL-2.0
3 * f2fs compress support
5 * Copyright (c) 2019 Chao Yu <chao@kernel.org>
9 #include <linux/f2fs_fs.h>
10 #include <linux/writeback.h>
11 #include <linux/backing-dev.h>
12 #include <linux/lzo.h>
13 #include <linux/lz4.h>
14 #include <linux/zstd.h>
18 #include <trace/events/f2fs.h>
20 static struct kmem_cache
*cic_entry_slab
;
21 static struct kmem_cache
*dic_entry_slab
;
23 static void *page_array_alloc(struct inode
*inode
, int nr
)
25 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
26 unsigned int size
= sizeof(struct page
*) * nr
;
28 if (likely(size
<= sbi
->page_array_slab_size
))
29 return kmem_cache_zalloc(sbi
->page_array_slab
, GFP_NOFS
);
30 return f2fs_kzalloc(sbi
, size
, GFP_NOFS
);
33 static void page_array_free(struct inode
*inode
, void *pages
, int nr
)
35 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
36 unsigned int size
= sizeof(struct page
*) * nr
;
41 if (likely(size
<= sbi
->page_array_slab_size
))
42 kmem_cache_free(sbi
->page_array_slab
, pages
);
47 struct f2fs_compress_ops
{
48 int (*init_compress_ctx
)(struct compress_ctx
*cc
);
49 void (*destroy_compress_ctx
)(struct compress_ctx
*cc
);
50 int (*compress_pages
)(struct compress_ctx
*cc
);
51 int (*init_decompress_ctx
)(struct decompress_io_ctx
*dic
);
52 void (*destroy_decompress_ctx
)(struct decompress_io_ctx
*dic
);
53 int (*decompress_pages
)(struct decompress_io_ctx
*dic
);
56 static unsigned int offset_in_cluster(struct compress_ctx
*cc
, pgoff_t index
)
58 return index
& (cc
->cluster_size
- 1);
61 static pgoff_t
cluster_idx(struct compress_ctx
*cc
, pgoff_t index
)
63 return index
>> cc
->log_cluster_size
;
66 static pgoff_t
start_idx_of_cluster(struct compress_ctx
*cc
)
68 return cc
->cluster_idx
<< cc
->log_cluster_size
;
71 bool f2fs_is_compressed_page(struct page
*page
)
73 if (!PagePrivate(page
))
75 if (!page_private(page
))
77 if (IS_ATOMIC_WRITTEN_PAGE(page
) || IS_DUMMY_WRITTEN_PAGE(page
))
80 * page->private may be set with pid.
81 * pid_max is enough to check if it is traced.
83 if (IS_IO_TRACED_PAGE(page
))
86 f2fs_bug_on(F2FS_M_SB(page
->mapping
),
87 *((u32
*)page_private(page
)) != F2FS_COMPRESSED_PAGE_MAGIC
);
91 static void f2fs_set_compressed_page(struct page
*page
,
92 struct inode
*inode
, pgoff_t index
, void *data
)
95 set_page_private(page
, (unsigned long)data
);
97 /* i_crypto_info and iv index */
99 page
->mapping
= inode
->i_mapping
;
102 static void f2fs_drop_rpages(struct compress_ctx
*cc
, int len
, bool unlock
)
106 for (i
= 0; i
< len
; i
++) {
110 unlock_page(cc
->rpages
[i
]);
112 put_page(cc
->rpages
[i
]);
116 static void f2fs_put_rpages(struct compress_ctx
*cc
)
118 f2fs_drop_rpages(cc
, cc
->cluster_size
, false);
121 static void f2fs_unlock_rpages(struct compress_ctx
*cc
, int len
)
123 f2fs_drop_rpages(cc
, len
, true);
126 static void f2fs_put_rpages_mapping(struct address_space
*mapping
,
127 pgoff_t start
, int len
)
131 for (i
= 0; i
< len
; i
++) {
132 struct page
*page
= find_get_page(mapping
, start
+ i
);
139 static void f2fs_put_rpages_wbc(struct compress_ctx
*cc
,
140 struct writeback_control
*wbc
, bool redirty
, int unlock
)
144 for (i
= 0; i
< cc
->cluster_size
; i
++) {
148 redirty_page_for_writepage(wbc
, cc
->rpages
[i
]);
149 f2fs_put_page(cc
->rpages
[i
], unlock
);
153 struct page
*f2fs_compress_control_page(struct page
*page
)
155 return ((struct compress_io_ctx
*)page_private(page
))->rpages
[0];
158 int f2fs_init_compress_ctx(struct compress_ctx
*cc
)
163 cc
->rpages
= page_array_alloc(cc
->inode
, cc
->cluster_size
);
164 return cc
->rpages
? 0 : -ENOMEM
;
167 void f2fs_destroy_compress_ctx(struct compress_ctx
*cc
)
169 page_array_free(cc
->inode
, cc
->rpages
, cc
->cluster_size
);
173 cc
->cluster_idx
= NULL_CLUSTER
;
176 void f2fs_compress_ctx_add_page(struct compress_ctx
*cc
, struct page
*page
)
178 unsigned int cluster_ofs
;
180 if (!f2fs_cluster_can_merge_page(cc
, page
->index
))
181 f2fs_bug_on(F2FS_I_SB(cc
->inode
), 1);
183 cluster_ofs
= offset_in_cluster(cc
, page
->index
);
184 cc
->rpages
[cluster_ofs
] = page
;
186 cc
->cluster_idx
= cluster_idx(cc
, page
->index
);
189 #ifdef CONFIG_F2FS_FS_LZO
190 static int lzo_init_compress_ctx(struct compress_ctx
*cc
)
192 cc
->private = f2fs_kvmalloc(F2FS_I_SB(cc
->inode
),
193 LZO1X_MEM_COMPRESS
, GFP_NOFS
);
197 cc
->clen
= lzo1x_worst_compress(PAGE_SIZE
<< cc
->log_cluster_size
);
201 static void lzo_destroy_compress_ctx(struct compress_ctx
*cc
)
207 static int lzo_compress_pages(struct compress_ctx
*cc
)
211 ret
= lzo1x_1_compress(cc
->rbuf
, cc
->rlen
, cc
->cbuf
->cdata
,
212 &cc
->clen
, cc
->private);
213 if (ret
!= LZO_E_OK
) {
214 printk_ratelimited("%sF2FS-fs (%s): lzo compress failed, ret:%d\n",
215 KERN_ERR
, F2FS_I_SB(cc
->inode
)->sb
->s_id
, ret
);
221 static int lzo_decompress_pages(struct decompress_io_ctx
*dic
)
225 ret
= lzo1x_decompress_safe(dic
->cbuf
->cdata
, dic
->clen
,
226 dic
->rbuf
, &dic
->rlen
);
227 if (ret
!= LZO_E_OK
) {
228 printk_ratelimited("%sF2FS-fs (%s): lzo decompress failed, ret:%d\n",
229 KERN_ERR
, F2FS_I_SB(dic
->inode
)->sb
->s_id
, ret
);
233 if (dic
->rlen
!= PAGE_SIZE
<< dic
->log_cluster_size
) {
234 printk_ratelimited("%sF2FS-fs (%s): lzo invalid rlen:%zu, "
235 "expected:%lu\n", KERN_ERR
,
236 F2FS_I_SB(dic
->inode
)->sb
->s_id
,
238 PAGE_SIZE
<< dic
->log_cluster_size
);
244 static const struct f2fs_compress_ops f2fs_lzo_ops
= {
245 .init_compress_ctx
= lzo_init_compress_ctx
,
246 .destroy_compress_ctx
= lzo_destroy_compress_ctx
,
247 .compress_pages
= lzo_compress_pages
,
248 .decompress_pages
= lzo_decompress_pages
,
252 #ifdef CONFIG_F2FS_FS_LZ4
253 static int lz4_init_compress_ctx(struct compress_ctx
*cc
)
255 cc
->private = f2fs_kvmalloc(F2FS_I_SB(cc
->inode
),
256 LZ4_MEM_COMPRESS
, GFP_NOFS
);
261 * we do not change cc->clen to LZ4_compressBound(inputsize) to
262 * adapt worst compress case, because lz4 compressor can handle
263 * output budget properly.
265 cc
->clen
= cc
->rlen
- PAGE_SIZE
- COMPRESS_HEADER_SIZE
;
269 static void lz4_destroy_compress_ctx(struct compress_ctx
*cc
)
275 static int lz4_compress_pages(struct compress_ctx
*cc
)
279 len
= LZ4_compress_default(cc
->rbuf
, cc
->cbuf
->cdata
, cc
->rlen
,
280 cc
->clen
, cc
->private);
288 static int lz4_decompress_pages(struct decompress_io_ctx
*dic
)
292 ret
= LZ4_decompress_safe(dic
->cbuf
->cdata
, dic
->rbuf
,
293 dic
->clen
, dic
->rlen
);
295 printk_ratelimited("%sF2FS-fs (%s): lz4 decompress failed, ret:%d\n",
296 KERN_ERR
, F2FS_I_SB(dic
->inode
)->sb
->s_id
, ret
);
300 if (ret
!= PAGE_SIZE
<< dic
->log_cluster_size
) {
301 printk_ratelimited("%sF2FS-fs (%s): lz4 invalid rlen:%zu, "
302 "expected:%lu\n", KERN_ERR
,
303 F2FS_I_SB(dic
->inode
)->sb
->s_id
,
305 PAGE_SIZE
<< dic
->log_cluster_size
);
311 static const struct f2fs_compress_ops f2fs_lz4_ops
= {
312 .init_compress_ctx
= lz4_init_compress_ctx
,
313 .destroy_compress_ctx
= lz4_destroy_compress_ctx
,
314 .compress_pages
= lz4_compress_pages
,
315 .decompress_pages
= lz4_decompress_pages
,
319 #ifdef CONFIG_F2FS_FS_ZSTD
320 #define F2FS_ZSTD_DEFAULT_CLEVEL 1
322 static int zstd_init_compress_ctx(struct compress_ctx
*cc
)
324 ZSTD_parameters params
;
325 ZSTD_CStream
*stream
;
327 unsigned int workspace_size
;
329 params
= ZSTD_getParams(F2FS_ZSTD_DEFAULT_CLEVEL
, cc
->rlen
, 0);
330 workspace_size
= ZSTD_CStreamWorkspaceBound(params
.cParams
);
332 workspace
= f2fs_kvmalloc(F2FS_I_SB(cc
->inode
),
333 workspace_size
, GFP_NOFS
);
337 stream
= ZSTD_initCStream(params
, 0, workspace
, workspace_size
);
339 printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_initCStream failed\n",
340 KERN_ERR
, F2FS_I_SB(cc
->inode
)->sb
->s_id
,
346 cc
->private = workspace
;
347 cc
->private2
= stream
;
349 cc
->clen
= cc
->rlen
- PAGE_SIZE
- COMPRESS_HEADER_SIZE
;
353 static void zstd_destroy_compress_ctx(struct compress_ctx
*cc
)
360 static int zstd_compress_pages(struct compress_ctx
*cc
)
362 ZSTD_CStream
*stream
= cc
->private2
;
364 ZSTD_outBuffer outbuf
;
365 int src_size
= cc
->rlen
;
366 int dst_size
= src_size
- PAGE_SIZE
- COMPRESS_HEADER_SIZE
;
370 inbuf
.src
= cc
->rbuf
;
371 inbuf
.size
= src_size
;
374 outbuf
.dst
= cc
->cbuf
->cdata
;
375 outbuf
.size
= dst_size
;
377 ret
= ZSTD_compressStream(stream
, &outbuf
, &inbuf
);
378 if (ZSTD_isError(ret
)) {
379 printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_compressStream failed, ret: %d\n",
380 KERN_ERR
, F2FS_I_SB(cc
->inode
)->sb
->s_id
,
381 __func__
, ZSTD_getErrorCode(ret
));
385 ret
= ZSTD_endStream(stream
, &outbuf
);
386 if (ZSTD_isError(ret
)) {
387 printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_endStream returned %d\n",
388 KERN_ERR
, F2FS_I_SB(cc
->inode
)->sb
->s_id
,
389 __func__
, ZSTD_getErrorCode(ret
));
394 * there is compressed data remained in intermediate buffer due to
395 * no more space in cbuf.cdata
400 cc
->clen
= outbuf
.pos
;
404 static int zstd_init_decompress_ctx(struct decompress_io_ctx
*dic
)
406 ZSTD_DStream
*stream
;
408 unsigned int workspace_size
;
409 unsigned int max_window_size
=
410 MAX_COMPRESS_WINDOW_SIZE(dic
->log_cluster_size
);
412 workspace_size
= ZSTD_DStreamWorkspaceBound(max_window_size
);
414 workspace
= f2fs_kvmalloc(F2FS_I_SB(dic
->inode
),
415 workspace_size
, GFP_NOFS
);
419 stream
= ZSTD_initDStream(max_window_size
, workspace
, workspace_size
);
421 printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_initDStream failed\n",
422 KERN_ERR
, F2FS_I_SB(dic
->inode
)->sb
->s_id
,
428 dic
->private = workspace
;
429 dic
->private2
= stream
;
434 static void zstd_destroy_decompress_ctx(struct decompress_io_ctx
*dic
)
436 kvfree(dic
->private);
438 dic
->private2
= NULL
;
441 static int zstd_decompress_pages(struct decompress_io_ctx
*dic
)
443 ZSTD_DStream
*stream
= dic
->private2
;
445 ZSTD_outBuffer outbuf
;
449 inbuf
.src
= dic
->cbuf
->cdata
;
450 inbuf
.size
= dic
->clen
;
453 outbuf
.dst
= dic
->rbuf
;
454 outbuf
.size
= dic
->rlen
;
456 ret
= ZSTD_decompressStream(stream
, &outbuf
, &inbuf
);
457 if (ZSTD_isError(ret
)) {
458 printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_compressStream failed, ret: %d\n",
459 KERN_ERR
, F2FS_I_SB(dic
->inode
)->sb
->s_id
,
460 __func__
, ZSTD_getErrorCode(ret
));
464 if (dic
->rlen
!= outbuf
.pos
) {
465 printk_ratelimited("%sF2FS-fs (%s): %s ZSTD invalid rlen:%zu, "
466 "expected:%lu\n", KERN_ERR
,
467 F2FS_I_SB(dic
->inode
)->sb
->s_id
,
469 PAGE_SIZE
<< dic
->log_cluster_size
);
476 static const struct f2fs_compress_ops f2fs_zstd_ops
= {
477 .init_compress_ctx
= zstd_init_compress_ctx
,
478 .destroy_compress_ctx
= zstd_destroy_compress_ctx
,
479 .compress_pages
= zstd_compress_pages
,
480 .init_decompress_ctx
= zstd_init_decompress_ctx
,
481 .destroy_decompress_ctx
= zstd_destroy_decompress_ctx
,
482 .decompress_pages
= zstd_decompress_pages
,
486 #ifdef CONFIG_F2FS_FS_LZO
487 #ifdef CONFIG_F2FS_FS_LZORLE
488 static int lzorle_compress_pages(struct compress_ctx
*cc
)
492 ret
= lzorle1x_1_compress(cc
->rbuf
, cc
->rlen
, cc
->cbuf
->cdata
,
493 &cc
->clen
, cc
->private);
494 if (ret
!= LZO_E_OK
) {
495 printk_ratelimited("%sF2FS-fs (%s): lzo-rle compress failed, ret:%d\n",
496 KERN_ERR
, F2FS_I_SB(cc
->inode
)->sb
->s_id
, ret
);
502 static const struct f2fs_compress_ops f2fs_lzorle_ops
= {
503 .init_compress_ctx
= lzo_init_compress_ctx
,
504 .destroy_compress_ctx
= lzo_destroy_compress_ctx
,
505 .compress_pages
= lzorle_compress_pages
,
506 .decompress_pages
= lzo_decompress_pages
,
511 static const struct f2fs_compress_ops
*f2fs_cops
[COMPRESS_MAX
] = {
512 #ifdef CONFIG_F2FS_FS_LZO
517 #ifdef CONFIG_F2FS_FS_LZ4
522 #ifdef CONFIG_F2FS_FS_ZSTD
527 #if defined(CONFIG_F2FS_FS_LZO) && defined(CONFIG_F2FS_FS_LZORLE)
534 bool f2fs_is_compress_backend_ready(struct inode
*inode
)
536 if (!f2fs_compressed_file(inode
))
538 return f2fs_cops
[F2FS_I(inode
)->i_compress_algorithm
];
541 static mempool_t
*compress_page_pool
;
542 static int num_compress_pages
= 512;
543 module_param(num_compress_pages
, uint
, 0444);
544 MODULE_PARM_DESC(num_compress_pages
,
545 "Number of intermediate compress pages to preallocate");
547 int f2fs_init_compress_mempool(void)
549 compress_page_pool
= mempool_create_page_pool(num_compress_pages
, 0);
550 if (!compress_page_pool
)
556 void f2fs_destroy_compress_mempool(void)
558 mempool_destroy(compress_page_pool
);
561 static struct page
*f2fs_compress_alloc_page(void)
565 page
= mempool_alloc(compress_page_pool
, GFP_NOFS
);
571 static void f2fs_compress_free_page(struct page
*page
)
575 set_page_private(page
, (unsigned long)NULL
);
576 ClearPagePrivate(page
);
577 page
->mapping
= NULL
;
579 mempool_free(page
, compress_page_pool
);
582 #define MAX_VMAP_RETRIES 3
584 static void *f2fs_vmap(struct page
**pages
, unsigned int count
)
589 for (i
= 0; i
< MAX_VMAP_RETRIES
; i
++) {
590 buf
= vm_map_ram(pages
, count
, -1);
598 static int f2fs_compress_pages(struct compress_ctx
*cc
)
600 struct f2fs_inode_info
*fi
= F2FS_I(cc
->inode
);
601 const struct f2fs_compress_ops
*cops
=
602 f2fs_cops
[fi
->i_compress_algorithm
];
603 unsigned int max_len
, new_nr_cpages
;
604 struct page
**new_cpages
;
608 trace_f2fs_compress_pages_start(cc
->inode
, cc
->cluster_idx
,
609 cc
->cluster_size
, fi
->i_compress_algorithm
);
611 if (cops
->init_compress_ctx
) {
612 ret
= cops
->init_compress_ctx(cc
);
617 max_len
= COMPRESS_HEADER_SIZE
+ cc
->clen
;
618 cc
->nr_cpages
= DIV_ROUND_UP(max_len
, PAGE_SIZE
);
620 cc
->cpages
= page_array_alloc(cc
->inode
, cc
->nr_cpages
);
623 goto destroy_compress_ctx
;
626 for (i
= 0; i
< cc
->nr_cpages
; i
++) {
627 cc
->cpages
[i
] = f2fs_compress_alloc_page();
628 if (!cc
->cpages
[i
]) {
630 goto out_free_cpages
;
634 cc
->rbuf
= f2fs_vmap(cc
->rpages
, cc
->cluster_size
);
637 goto out_free_cpages
;
640 cc
->cbuf
= f2fs_vmap(cc
->cpages
, cc
->nr_cpages
);
643 goto out_vunmap_rbuf
;
646 ret
= cops
->compress_pages(cc
);
648 goto out_vunmap_cbuf
;
650 max_len
= PAGE_SIZE
* (cc
->cluster_size
- 1) - COMPRESS_HEADER_SIZE
;
652 if (cc
->clen
> max_len
) {
654 goto out_vunmap_cbuf
;
657 cc
->cbuf
->clen
= cpu_to_le32(cc
->clen
);
659 if (fi
->i_compress_flag
& 1 << COMPRESS_CHKSUM
)
660 chksum
= f2fs_crc32(F2FS_I_SB(cc
->inode
),
661 cc
->cbuf
->cdata
, cc
->clen
);
662 cc
->cbuf
->chksum
= cpu_to_le32(chksum
);
664 for (i
= 0; i
< COMPRESS_DATA_RESERVED_SIZE
; i
++)
665 cc
->cbuf
->reserved
[i
] = cpu_to_le32(0);
667 new_nr_cpages
= DIV_ROUND_UP(cc
->clen
+ COMPRESS_HEADER_SIZE
, PAGE_SIZE
);
669 /* Now we're going to cut unnecessary tail pages */
670 new_cpages
= page_array_alloc(cc
->inode
, new_nr_cpages
);
673 goto out_vunmap_cbuf
;
676 /* zero out any unused part of the last page */
677 memset(&cc
->cbuf
->cdata
[cc
->clen
], 0,
678 (new_nr_cpages
* PAGE_SIZE
) -
679 (cc
->clen
+ COMPRESS_HEADER_SIZE
));
681 vm_unmap_ram(cc
->cbuf
, cc
->nr_cpages
);
682 vm_unmap_ram(cc
->rbuf
, cc
->cluster_size
);
684 for (i
= 0; i
< cc
->nr_cpages
; i
++) {
685 if (i
< new_nr_cpages
) {
686 new_cpages
[i
] = cc
->cpages
[i
];
689 f2fs_compress_free_page(cc
->cpages
[i
]);
690 cc
->cpages
[i
] = NULL
;
693 if (cops
->destroy_compress_ctx
)
694 cops
->destroy_compress_ctx(cc
);
696 page_array_free(cc
->inode
, cc
->cpages
, cc
->nr_cpages
);
697 cc
->cpages
= new_cpages
;
698 cc
->nr_cpages
= new_nr_cpages
;
700 trace_f2fs_compress_pages_end(cc
->inode
, cc
->cluster_idx
,
705 vm_unmap_ram(cc
->cbuf
, cc
->nr_cpages
);
707 vm_unmap_ram(cc
->rbuf
, cc
->cluster_size
);
709 for (i
= 0; i
< cc
->nr_cpages
; i
++) {
711 f2fs_compress_free_page(cc
->cpages
[i
]);
713 page_array_free(cc
->inode
, cc
->cpages
, cc
->nr_cpages
);
715 destroy_compress_ctx
:
716 if (cops
->destroy_compress_ctx
)
717 cops
->destroy_compress_ctx(cc
);
719 trace_f2fs_compress_pages_end(cc
->inode
, cc
->cluster_idx
,
724 void f2fs_decompress_pages(struct bio
*bio
, struct page
*page
, bool verity
)
726 struct decompress_io_ctx
*dic
=
727 (struct decompress_io_ctx
*)page_private(page
);
728 struct f2fs_sb_info
*sbi
= F2FS_I_SB(dic
->inode
);
729 struct f2fs_inode_info
*fi
= F2FS_I(dic
->inode
);
730 const struct f2fs_compress_ops
*cops
=
731 f2fs_cops
[fi
->i_compress_algorithm
];
735 dec_page_count(sbi
, F2FS_RD_DATA
);
737 if (bio
->bi_status
|| PageError(page
))
740 if (atomic_dec_return(&dic
->pending_pages
))
743 trace_f2fs_decompress_pages_start(dic
->inode
, dic
->cluster_idx
,
744 dic
->cluster_size
, fi
->i_compress_algorithm
);
746 /* submit partial compressed pages */
752 dic
->tpages
= page_array_alloc(dic
->inode
, dic
->cluster_size
);
758 for (i
= 0; i
< dic
->cluster_size
; i
++) {
759 if (dic
->rpages
[i
]) {
760 dic
->tpages
[i
] = dic
->rpages
[i
];
764 dic
->tpages
[i
] = f2fs_compress_alloc_page();
765 if (!dic
->tpages
[i
]) {
771 if (cops
->init_decompress_ctx
) {
772 ret
= cops
->init_decompress_ctx(dic
);
777 dic
->rbuf
= f2fs_vmap(dic
->tpages
, dic
->cluster_size
);
780 goto destroy_decompress_ctx
;
783 dic
->cbuf
= f2fs_vmap(dic
->cpages
, dic
->nr_cpages
);
786 goto out_vunmap_rbuf
;
789 dic
->clen
= le32_to_cpu(dic
->cbuf
->clen
);
790 dic
->rlen
= PAGE_SIZE
<< dic
->log_cluster_size
;
792 if (dic
->clen
> PAGE_SIZE
* dic
->nr_cpages
- COMPRESS_HEADER_SIZE
) {
794 goto out_vunmap_cbuf
;
797 ret
= cops
->decompress_pages(dic
);
799 if (!ret
&& (fi
->i_compress_flag
& 1 << COMPRESS_CHKSUM
)) {
800 u32 provided
= le32_to_cpu(dic
->cbuf
->chksum
);
801 u32 calculated
= f2fs_crc32(sbi
, dic
->cbuf
->cdata
, dic
->clen
);
803 if (provided
!= calculated
) {
804 if (!is_inode_flag_set(dic
->inode
, FI_COMPRESS_CORRUPT
)) {
805 set_inode_flag(dic
->inode
, FI_COMPRESS_CORRUPT
);
807 "%sF2FS-fs (%s): checksum invalid, nid = %lu, %x vs %x",
808 KERN_INFO
, sbi
->sb
->s_id
, dic
->inode
->i_ino
,
809 provided
, calculated
);
811 set_sbi_flag(sbi
, SBI_NEED_FSCK
);
816 vm_unmap_ram(dic
->cbuf
, dic
->nr_cpages
);
818 vm_unmap_ram(dic
->rbuf
, dic
->cluster_size
);
819 destroy_decompress_ctx
:
820 if (cops
->destroy_decompress_ctx
)
821 cops
->destroy_decompress_ctx(dic
);
824 f2fs_decompress_end_io(dic
->rpages
, dic
->cluster_size
,
827 trace_f2fs_decompress_pages_end(dic
->inode
, dic
->cluster_idx
,
833 static bool is_page_in_cluster(struct compress_ctx
*cc
, pgoff_t index
)
835 if (cc
->cluster_idx
== NULL_CLUSTER
)
837 return cc
->cluster_idx
== cluster_idx(cc
, index
);
840 bool f2fs_cluster_is_empty(struct compress_ctx
*cc
)
842 return cc
->nr_rpages
== 0;
845 static bool f2fs_cluster_is_full(struct compress_ctx
*cc
)
847 return cc
->cluster_size
== cc
->nr_rpages
;
850 bool f2fs_cluster_can_merge_page(struct compress_ctx
*cc
, pgoff_t index
)
852 if (f2fs_cluster_is_empty(cc
))
854 return is_page_in_cluster(cc
, index
);
857 static bool __cluster_may_compress(struct compress_ctx
*cc
)
859 struct f2fs_sb_info
*sbi
= F2FS_I_SB(cc
->inode
);
860 loff_t i_size
= i_size_read(cc
->inode
);
861 unsigned nr_pages
= DIV_ROUND_UP(i_size
, PAGE_SIZE
);
864 for (i
= 0; i
< cc
->cluster_size
; i
++) {
865 struct page
*page
= cc
->rpages
[i
];
867 f2fs_bug_on(sbi
, !page
);
869 if (unlikely(f2fs_cp_error(sbi
)))
871 if (unlikely(is_sbi_flag_set(sbi
, SBI_POR_DOING
)))
875 if (page
->index
>= nr_pages
)
881 static int __f2fs_cluster_blocks(struct compress_ctx
*cc
, bool compr
)
883 struct dnode_of_data dn
;
886 set_new_dnode(&dn
, cc
->inode
, NULL
, NULL
, 0);
887 ret
= f2fs_get_dnode_of_data(&dn
, start_idx_of_cluster(cc
),
895 if (dn
.data_blkaddr
== COMPRESS_ADDR
) {
899 for (i
= 1; i
< cc
->cluster_size
; i
++) {
902 blkaddr
= data_blkaddr(dn
.inode
,
903 dn
.node_page
, dn
.ofs_in_node
+ i
);
905 if (__is_valid_data_blkaddr(blkaddr
))
908 if (blkaddr
!= NULL_ADDR
)
918 /* return # of compressed blocks in compressed cluster */
919 static int f2fs_compressed_blocks(struct compress_ctx
*cc
)
921 return __f2fs_cluster_blocks(cc
, true);
924 /* return # of valid blocks in compressed cluster */
925 static int f2fs_cluster_blocks(struct compress_ctx
*cc
)
927 return __f2fs_cluster_blocks(cc
, false);
930 int f2fs_is_compressed_cluster(struct inode
*inode
, pgoff_t index
)
932 struct compress_ctx cc
= {
934 .log_cluster_size
= F2FS_I(inode
)->i_log_cluster_size
,
935 .cluster_size
= F2FS_I(inode
)->i_cluster_size
,
936 .cluster_idx
= index
>> F2FS_I(inode
)->i_log_cluster_size
,
939 return f2fs_cluster_blocks(&cc
);
942 static bool cluster_may_compress(struct compress_ctx
*cc
)
944 if (!f2fs_need_compress_data(cc
->inode
))
946 if (f2fs_is_atomic_file(cc
->inode
))
948 if (f2fs_is_mmap_file(cc
->inode
))
950 if (!f2fs_cluster_is_full(cc
))
952 if (unlikely(f2fs_cp_error(F2FS_I_SB(cc
->inode
))))
954 return __cluster_may_compress(cc
);
957 static void set_cluster_writeback(struct compress_ctx
*cc
)
961 for (i
= 0; i
< cc
->cluster_size
; i
++) {
963 set_page_writeback(cc
->rpages
[i
]);
967 static void set_cluster_dirty(struct compress_ctx
*cc
)
971 for (i
= 0; i
< cc
->cluster_size
; i
++)
973 set_page_dirty(cc
->rpages
[i
]);
976 static int prepare_compress_overwrite(struct compress_ctx
*cc
,
977 struct page
**pagep
, pgoff_t index
, void **fsdata
)
979 struct f2fs_sb_info
*sbi
= F2FS_I_SB(cc
->inode
);
980 struct address_space
*mapping
= cc
->inode
->i_mapping
;
982 struct dnode_of_data dn
;
983 sector_t last_block_in_bio
;
984 unsigned fgp_flag
= FGP_LOCK
| FGP_WRITE
| FGP_CREAT
;
985 pgoff_t start_idx
= start_idx_of_cluster(cc
);
990 ret
= f2fs_cluster_blocks(cc
);
994 /* compressed case */
995 prealloc
= (ret
< cc
->cluster_size
);
997 ret
= f2fs_init_compress_ctx(cc
);
1001 /* keep page reference to avoid page reclaim */
1002 for (i
= 0; i
< cc
->cluster_size
; i
++) {
1003 page
= f2fs_pagecache_get_page(mapping
, start_idx
+ i
,
1004 fgp_flag
, GFP_NOFS
);
1010 if (PageUptodate(page
))
1013 f2fs_compress_ctx_add_page(cc
, page
);
1016 if (!f2fs_cluster_is_empty(cc
)) {
1017 struct bio
*bio
= NULL
;
1019 ret
= f2fs_read_multi_pages(cc
, &bio
, cc
->cluster_size
,
1020 &last_block_in_bio
, false, true);
1021 f2fs_destroy_compress_ctx(cc
);
1025 f2fs_submit_bio(sbi
, bio
, DATA
);
1027 ret
= f2fs_init_compress_ctx(cc
);
1032 for (i
= 0; i
< cc
->cluster_size
; i
++) {
1033 f2fs_bug_on(sbi
, cc
->rpages
[i
]);
1035 page
= find_lock_page(mapping
, start_idx
+ i
);
1036 f2fs_bug_on(sbi
, !page
);
1038 f2fs_wait_on_page_writeback(page
, DATA
, true, true);
1040 f2fs_compress_ctx_add_page(cc
, page
);
1041 f2fs_put_page(page
, 0);
1043 if (!PageUptodate(page
)) {
1044 f2fs_unlock_rpages(cc
, i
+ 1);
1045 f2fs_put_rpages_mapping(mapping
, start_idx
,
1047 f2fs_destroy_compress_ctx(cc
);
1053 f2fs_do_map_lock(sbi
, F2FS_GET_BLOCK_PRE_AIO
, true);
1055 set_new_dnode(&dn
, cc
->inode
, NULL
, NULL
, 0);
1057 for (i
= cc
->cluster_size
- 1; i
> 0; i
--) {
1058 ret
= f2fs_get_block(&dn
, start_idx
+ i
);
1060 i
= cc
->cluster_size
;
1064 if (dn
.data_blkaddr
!= NEW_ADDR
)
1068 f2fs_do_map_lock(sbi
, F2FS_GET_BLOCK_PRE_AIO
, false);
1072 *fsdata
= cc
->rpages
;
1073 *pagep
= cc
->rpages
[offset_in_cluster(cc
, index
)];
1074 return cc
->cluster_size
;
1078 f2fs_unlock_rpages(cc
, i
);
1080 f2fs_put_rpages_mapping(mapping
, start_idx
, i
);
1081 f2fs_destroy_compress_ctx(cc
);
1085 int f2fs_prepare_compress_overwrite(struct inode
*inode
,
1086 struct page
**pagep
, pgoff_t index
, void **fsdata
)
1088 struct compress_ctx cc
= {
1090 .log_cluster_size
= F2FS_I(inode
)->i_log_cluster_size
,
1091 .cluster_size
= F2FS_I(inode
)->i_cluster_size
,
1092 .cluster_idx
= index
>> F2FS_I(inode
)->i_log_cluster_size
,
1097 return prepare_compress_overwrite(&cc
, pagep
, index
, fsdata
);
1100 bool f2fs_compress_write_end(struct inode
*inode
, void *fsdata
,
1101 pgoff_t index
, unsigned copied
)
1104 struct compress_ctx cc
= {
1106 .log_cluster_size
= F2FS_I(inode
)->i_log_cluster_size
,
1107 .cluster_size
= F2FS_I(inode
)->i_cluster_size
,
1110 bool first_index
= (index
== cc
.rpages
[0]->index
);
1113 set_cluster_dirty(&cc
);
1115 f2fs_put_rpages_wbc(&cc
, NULL
, false, 1);
1116 f2fs_destroy_compress_ctx(&cc
);
1121 int f2fs_truncate_partial_cluster(struct inode
*inode
, u64 from
, bool lock
)
1123 void *fsdata
= NULL
;
1125 int log_cluster_size
= F2FS_I(inode
)->i_log_cluster_size
;
1126 pgoff_t start_idx
= from
>> (PAGE_SHIFT
+ log_cluster_size
) <<
1130 err
= f2fs_is_compressed_cluster(inode
, start_idx
);
1134 /* truncate normal cluster */
1136 return f2fs_do_truncate_blocks(inode
, from
, lock
);
1138 /* truncate compressed cluster */
1139 err
= f2fs_prepare_compress_overwrite(inode
, &pagep
,
1140 start_idx
, &fsdata
);
1142 /* should not be a normal cluster */
1143 f2fs_bug_on(F2FS_I_SB(inode
), err
== 0);
1149 struct page
**rpages
= fsdata
;
1150 int cluster_size
= F2FS_I(inode
)->i_cluster_size
;
1153 for (i
= cluster_size
- 1; i
>= 0; i
--) {
1154 loff_t start
= rpages
[i
]->index
<< PAGE_SHIFT
;
1156 if (from
<= start
) {
1157 zero_user_segment(rpages
[i
], 0, PAGE_SIZE
);
1159 zero_user_segment(rpages
[i
], from
- start
,
1165 f2fs_compress_write_end(inode
, fsdata
, start_idx
, true);
1170 static int f2fs_write_compressed_pages(struct compress_ctx
*cc
,
1172 struct writeback_control
*wbc
,
1173 enum iostat_type io_type
)
1175 struct inode
*inode
= cc
->inode
;
1176 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1177 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
1178 struct f2fs_io_info fio
= {
1180 .ino
= cc
->inode
->i_ino
,
1183 .op_flags
= wbc_to_write_flags(wbc
),
1184 .old_blkaddr
= NEW_ADDR
,
1186 .encrypted_page
= NULL
,
1187 .compressed_page
= NULL
,
1191 .encrypted
= fscrypt_inode_uses_fs_layer_crypto(cc
->inode
),
1193 struct dnode_of_data dn
;
1194 struct node_info ni
;
1195 struct compress_io_ctx
*cic
;
1196 pgoff_t start_idx
= start_idx_of_cluster(cc
);
1197 unsigned int last_index
= cc
->cluster_size
- 1;
1201 if (IS_NOQUOTA(inode
)) {
1203 * We need to wait for node_write to avoid block allocation during
1204 * checkpoint. This can only happen to quota writes which can cause
1205 * the below discard race condition.
1207 down_read(&sbi
->node_write
);
1208 } else if (!f2fs_trylock_op(sbi
)) {
1212 set_new_dnode(&dn
, cc
->inode
, NULL
, NULL
, 0);
1214 err
= f2fs_get_dnode_of_data(&dn
, start_idx
, LOOKUP_NODE
);
1218 for (i
= 0; i
< cc
->cluster_size
; i
++) {
1219 if (data_blkaddr(dn
.inode
, dn
.node_page
,
1220 dn
.ofs_in_node
+ i
) == NULL_ADDR
)
1224 psize
= (loff_t
)(cc
->rpages
[last_index
]->index
+ 1) << PAGE_SHIFT
;
1226 err
= f2fs_get_node_info(fio
.sbi
, dn
.nid
, &ni
);
1230 fio
.version
= ni
.version
;
1232 cic
= kmem_cache_zalloc(cic_entry_slab
, GFP_NOFS
);
1236 cic
->magic
= F2FS_COMPRESSED_PAGE_MAGIC
;
1238 atomic_set(&cic
->pending_pages
, cc
->nr_cpages
);
1239 cic
->rpages
= page_array_alloc(cc
->inode
, cc
->cluster_size
);
1243 cic
->nr_rpages
= cc
->cluster_size
;
1245 for (i
= 0; i
< cc
->nr_cpages
; i
++) {
1246 f2fs_set_compressed_page(cc
->cpages
[i
], inode
,
1247 cc
->rpages
[i
+ 1]->index
, cic
);
1248 fio
.compressed_page
= cc
->cpages
[i
];
1250 fio
.old_blkaddr
= data_blkaddr(dn
.inode
, dn
.node_page
,
1251 dn
.ofs_in_node
+ i
+ 1);
1253 /* wait for GCed page writeback via META_MAPPING */
1254 f2fs_wait_on_block_writeback(inode
, fio
.old_blkaddr
);
1256 if (fio
.encrypted
) {
1257 fio
.page
= cc
->rpages
[i
+ 1];
1258 err
= f2fs_encrypt_one_page(&fio
);
1260 goto out_destroy_crypt
;
1261 cc
->cpages
[i
] = fio
.encrypted_page
;
1265 set_cluster_writeback(cc
);
1267 for (i
= 0; i
< cc
->cluster_size
; i
++)
1268 cic
->rpages
[i
] = cc
->rpages
[i
];
1270 for (i
= 0; i
< cc
->cluster_size
; i
++, dn
.ofs_in_node
++) {
1273 blkaddr
= f2fs_data_blkaddr(&dn
);
1274 fio
.page
= cc
->rpages
[i
];
1275 fio
.old_blkaddr
= blkaddr
;
1277 /* cluster header */
1279 if (blkaddr
== COMPRESS_ADDR
)
1281 if (__is_valid_data_blkaddr(blkaddr
))
1282 f2fs_invalidate_blocks(sbi
, blkaddr
);
1283 f2fs_update_data_blkaddr(&dn
, COMPRESS_ADDR
);
1284 goto unlock_continue
;
1287 if (fio
.compr_blocks
&& __is_valid_data_blkaddr(blkaddr
))
1290 if (i
> cc
->nr_cpages
) {
1291 if (__is_valid_data_blkaddr(blkaddr
)) {
1292 f2fs_invalidate_blocks(sbi
, blkaddr
);
1293 f2fs_update_data_blkaddr(&dn
, NEW_ADDR
);
1295 goto unlock_continue
;
1298 f2fs_bug_on(fio
.sbi
, blkaddr
== NULL_ADDR
);
1301 fio
.encrypted_page
= cc
->cpages
[i
- 1];
1303 fio
.compressed_page
= cc
->cpages
[i
- 1];
1305 cc
->cpages
[i
- 1] = NULL
;
1306 f2fs_outplace_write_data(&dn
, &fio
);
1309 inode_dec_dirty_pages(cc
->inode
);
1310 unlock_page(fio
.page
);
1313 if (fio
.compr_blocks
)
1314 f2fs_i_compr_blocks_update(inode
, fio
.compr_blocks
- 1, false);
1315 f2fs_i_compr_blocks_update(inode
, cc
->nr_cpages
, true);
1317 set_inode_flag(cc
->inode
, FI_APPEND_WRITE
);
1318 if (cc
->cluster_idx
== 0)
1319 set_inode_flag(inode
, FI_FIRST_BLOCK_WRITTEN
);
1321 f2fs_put_dnode(&dn
);
1322 if (IS_NOQUOTA(inode
))
1323 up_read(&sbi
->node_write
);
1325 f2fs_unlock_op(sbi
);
1327 spin_lock(&fi
->i_size_lock
);
1328 if (fi
->last_disk_size
< psize
)
1329 fi
->last_disk_size
= psize
;
1330 spin_unlock(&fi
->i_size_lock
);
1332 f2fs_put_rpages(cc
);
1333 page_array_free(cc
->inode
, cc
->cpages
, cc
->nr_cpages
);
1335 f2fs_destroy_compress_ctx(cc
);
1339 page_array_free(cc
->inode
, cic
->rpages
, cc
->cluster_size
);
1341 for (--i
; i
>= 0; i
--)
1342 fscrypt_finalize_bounce_page(&cc
->cpages
[i
]);
1343 for (i
= 0; i
< cc
->nr_cpages
; i
++) {
1346 f2fs_put_page(cc
->cpages
[i
], 1);
1349 kmem_cache_free(cic_entry_slab
, cic
);
1351 f2fs_put_dnode(&dn
);
1353 if (IS_NOQUOTA(inode
))
1354 up_read(&sbi
->node_write
);
1356 f2fs_unlock_op(sbi
);
1358 page_array_free(cc
->inode
, cc
->cpages
, cc
->nr_cpages
);
1363 void f2fs_compress_write_end_io(struct bio
*bio
, struct page
*page
)
1365 struct f2fs_sb_info
*sbi
= bio
->bi_private
;
1366 struct compress_io_ctx
*cic
=
1367 (struct compress_io_ctx
*)page_private(page
);
1370 if (unlikely(bio
->bi_status
))
1371 mapping_set_error(cic
->inode
->i_mapping
, -EIO
);
1373 f2fs_compress_free_page(page
);
1375 dec_page_count(sbi
, F2FS_WB_DATA
);
1377 if (atomic_dec_return(&cic
->pending_pages
))
1380 for (i
= 0; i
< cic
->nr_rpages
; i
++) {
1381 WARN_ON(!cic
->rpages
[i
]);
1382 clear_cold_data(cic
->rpages
[i
]);
1383 end_page_writeback(cic
->rpages
[i
]);
1386 page_array_free(cic
->inode
, cic
->rpages
, cic
->nr_rpages
);
1387 kmem_cache_free(cic_entry_slab
, cic
);
1390 static int f2fs_write_raw_pages(struct compress_ctx
*cc
,
1392 struct writeback_control
*wbc
,
1393 enum iostat_type io_type
)
1395 struct address_space
*mapping
= cc
->inode
->i_mapping
;
1396 int _submitted
, compr_blocks
, ret
;
1397 int i
= -1, err
= 0;
1399 compr_blocks
= f2fs_compressed_blocks(cc
);
1400 if (compr_blocks
< 0) {
1405 for (i
= 0; i
< cc
->cluster_size
; i
++) {
1409 if (cc
->rpages
[i
]->mapping
!= mapping
) {
1410 unlock_page(cc
->rpages
[i
]);
1414 BUG_ON(!PageLocked(cc
->rpages
[i
]));
1416 ret
= f2fs_write_single_data_page(cc
->rpages
[i
], &_submitted
,
1417 NULL
, NULL
, wbc
, io_type
,
1420 if (ret
== AOP_WRITEPAGE_ACTIVATE
) {
1421 unlock_page(cc
->rpages
[i
]);
1423 } else if (ret
== -EAGAIN
) {
1425 * for quota file, just redirty left pages to
1426 * avoid deadlock caused by cluster update race
1427 * from foreground operation.
1429 if (IS_NOQUOTA(cc
->inode
)) {
1435 congestion_wait(BLK_RW_ASYNC
,
1436 DEFAULT_IO_TIMEOUT
);
1437 lock_page(cc
->rpages
[i
]);
1439 if (!PageDirty(cc
->rpages
[i
])) {
1440 unlock_page(cc
->rpages
[i
]);
1444 clear_page_dirty_for_io(cc
->rpages
[i
]);
1451 *submitted
+= _submitted
;
1455 for (++i
; i
< cc
->cluster_size
; i
++) {
1458 redirty_page_for_writepage(wbc
, cc
->rpages
[i
]);
1459 unlock_page(cc
->rpages
[i
]);
1464 int f2fs_write_multi_pages(struct compress_ctx
*cc
,
1466 struct writeback_control
*wbc
,
1467 enum iostat_type io_type
)
1472 if (cluster_may_compress(cc
)) {
1473 err
= f2fs_compress_pages(cc
);
1474 if (err
== -EAGAIN
) {
1477 f2fs_put_rpages_wbc(cc
, wbc
, true, 1);
1481 err
= f2fs_write_compressed_pages(cc
, submitted
,
1485 f2fs_bug_on(F2FS_I_SB(cc
->inode
), err
!= -EAGAIN
);
1488 f2fs_bug_on(F2FS_I_SB(cc
->inode
), *submitted
);
1490 err
= f2fs_write_raw_pages(cc
, submitted
, wbc
, io_type
);
1491 f2fs_put_rpages_wbc(cc
, wbc
, false, 0);
1493 f2fs_destroy_compress_ctx(cc
);
1497 struct decompress_io_ctx
*f2fs_alloc_dic(struct compress_ctx
*cc
)
1499 struct decompress_io_ctx
*dic
;
1500 pgoff_t start_idx
= start_idx_of_cluster(cc
);
1503 dic
= kmem_cache_zalloc(dic_entry_slab
, GFP_NOFS
);
1505 return ERR_PTR(-ENOMEM
);
1507 dic
->rpages
= page_array_alloc(cc
->inode
, cc
->cluster_size
);
1509 kmem_cache_free(dic_entry_slab
, dic
);
1510 return ERR_PTR(-ENOMEM
);
1513 dic
->magic
= F2FS_COMPRESSED_PAGE_MAGIC
;
1514 dic
->inode
= cc
->inode
;
1515 atomic_set(&dic
->pending_pages
, cc
->nr_cpages
);
1516 dic
->cluster_idx
= cc
->cluster_idx
;
1517 dic
->cluster_size
= cc
->cluster_size
;
1518 dic
->log_cluster_size
= cc
->log_cluster_size
;
1519 dic
->nr_cpages
= cc
->nr_cpages
;
1520 dic
->failed
= false;
1522 for (i
= 0; i
< dic
->cluster_size
; i
++)
1523 dic
->rpages
[i
] = cc
->rpages
[i
];
1524 dic
->nr_rpages
= cc
->cluster_size
;
1526 dic
->cpages
= page_array_alloc(dic
->inode
, dic
->nr_cpages
);
1530 for (i
= 0; i
< dic
->nr_cpages
; i
++) {
1533 page
= f2fs_compress_alloc_page();
1537 f2fs_set_compressed_page(page
, cc
->inode
,
1538 start_idx
+ i
+ 1, dic
);
1539 dic
->cpages
[i
] = page
;
1546 return ERR_PTR(-ENOMEM
);
1549 void f2fs_free_dic(struct decompress_io_ctx
*dic
)
1554 for (i
= 0; i
< dic
->cluster_size
; i
++) {
1557 if (!dic
->tpages
[i
])
1559 f2fs_compress_free_page(dic
->tpages
[i
]);
1561 page_array_free(dic
->inode
, dic
->tpages
, dic
->cluster_size
);
1565 for (i
= 0; i
< dic
->nr_cpages
; i
++) {
1566 if (!dic
->cpages
[i
])
1568 f2fs_compress_free_page(dic
->cpages
[i
]);
1570 page_array_free(dic
->inode
, dic
->cpages
, dic
->nr_cpages
);
1573 page_array_free(dic
->inode
, dic
->rpages
, dic
->nr_rpages
);
1574 kmem_cache_free(dic_entry_slab
, dic
);
1577 void f2fs_decompress_end_io(struct page
**rpages
,
1578 unsigned int cluster_size
, bool err
, bool verity
)
1582 for (i
= 0; i
< cluster_size
; i
++) {
1583 struct page
*rpage
= rpages
[i
];
1588 if (err
|| PageError(rpage
))
1589 goto clear_uptodate
;
1591 if (!verity
|| fsverity_verify_page(rpage
)) {
1592 SetPageUptodate(rpage
);
1596 ClearPageUptodate(rpage
);
1597 ClearPageError(rpage
);
1603 int f2fs_init_page_array_cache(struct f2fs_sb_info
*sbi
)
1605 dev_t dev
= sbi
->sb
->s_bdev
->bd_dev
;
1608 sprintf(slab_name
, "f2fs_page_array_entry-%u:%u", MAJOR(dev
), MINOR(dev
));
1610 sbi
->page_array_slab_size
= sizeof(struct page
*) <<
1611 F2FS_OPTION(sbi
).compress_log_size
;
1613 sbi
->page_array_slab
= f2fs_kmem_cache_create(slab_name
,
1614 sbi
->page_array_slab_size
);
1615 if (!sbi
->page_array_slab
)
1620 void f2fs_destroy_page_array_cache(struct f2fs_sb_info
*sbi
)
1622 kmem_cache_destroy(sbi
->page_array_slab
);
1625 static int __init
f2fs_init_cic_cache(void)
1627 cic_entry_slab
= f2fs_kmem_cache_create("f2fs_cic_entry",
1628 sizeof(struct compress_io_ctx
));
1629 if (!cic_entry_slab
)
1634 static void f2fs_destroy_cic_cache(void)
1636 kmem_cache_destroy(cic_entry_slab
);
1639 static int __init
f2fs_init_dic_cache(void)
1641 dic_entry_slab
= f2fs_kmem_cache_create("f2fs_dic_entry",
1642 sizeof(struct decompress_io_ctx
));
1643 if (!dic_entry_slab
)
1648 static void f2fs_destroy_dic_cache(void)
1650 kmem_cache_destroy(dic_entry_slab
);
1653 int __init
f2fs_init_compress_cache(void)
1657 err
= f2fs_init_cic_cache();
1660 err
= f2fs_init_dic_cache();
1665 f2fs_destroy_cic_cache();
1670 void f2fs_destroy_compress_cache(void)
1672 f2fs_destroy_dic_cache();
1673 f2fs_destroy_cic_cache();