1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright (C) 2019 HUAWEI, Inc.
4 * https://www.huawei.com/
5 * Created by Gao Xiang <gaoxiang25@huawei.com>
7 #ifndef __EROFS_FS_COMPRESS_H
8 #define __EROFS_FS_COMPRESS_H
13 Z_EROFS_COMPRESSION_SHIFTED
= Z_EROFS_COMPRESSION_MAX
,
14 Z_EROFS_COMPRESSION_RUNTIME_MAX
17 struct z_erofs_decompress_req
{
18 struct super_block
*sb
;
19 struct page
**in
, **out
;
21 unsigned short pageofs_out
;
22 unsigned int inputsize
, outputsize
;
24 /* indicate the algorithm will be used for decompression */
26 bool inplace_io
, partial_decoding
;
29 /* some special page->private (unsigned long, see below) */
30 #define Z_EROFS_SHORTLIVED_PAGE (-1UL << 2)
31 #define Z_EROFS_PREALLOCATED_PAGE (-2UL << 2)
34 * For all pages in a pcluster, page->private should be one of
35 * Type Last 2bits page->private
36 * short-lived page 00 Z_EROFS_SHORTLIVED_PAGE
37 * preallocated page (tryalloc) 00 Z_EROFS_PREALLOCATED_PAGE
38 * cached/managed page 00 pointer to z_erofs_pcluster
39 * online page (file-backed, 01/10/11 sub-index << 2 | count
40 * some pages can be used for inplace I/O)
42 * page->mapping should be one of
44 * short-lived page NULL
45 * preallocated page NULL
46 * cached/managed page non-NULL or NULL (invalidated/truncated page)
47 * online page non-NULL
49 * For all managed pages, PG_private should be set with 1 extra refcount,
50 * which is used for page reclaim / migration.
54 * short-lived pages are pages directly from buddy system with specific
55 * page->private (no need to set PagePrivate since these are non-LRU /
56 * non-movable pages and bypass reclaim / migration code).
58 static inline bool z_erofs_is_shortlived_page(struct page
*page
)
60 if (page
->private != Z_EROFS_SHORTLIVED_PAGE
)
63 DBG_BUGON(page
->mapping
);
67 static inline bool z_erofs_put_shortlivedpage(struct list_head
*pagepool
,
70 if (!z_erofs_is_shortlived_page(page
))
73 /* short-lived pages should not be used by others at the same time */
74 if (page_ref_count(page
) > 1) {
77 /* follow the pcluster rule above. */
78 set_page_private(page
, 0);
79 list_add(&page
->lru
, pagepool
);
84 int z_erofs_decompress(struct z_erofs_decompress_req
*rq
,
85 struct list_head
*pagepool
);