1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2019 HUAWEI, Inc.
4 * https://www.huawei.com/
5 * Created by Gao Xiang <gaoxiang25@huawei.com>
8 #include <linux/module.h>
11 #ifndef LZ4_DISTANCE_MAX /* history window size */
12 #define LZ4_DISTANCE_MAX 65535 /* set to maximum value by default */
15 #define LZ4_MAX_DISTANCE_PAGES (DIV_ROUND_UP(LZ4_DISTANCE_MAX, PAGE_SIZE) + 1)
16 #ifndef LZ4_DECOMPRESS_INPLACE_MARGIN
17 #define LZ4_DECOMPRESS_INPLACE_MARGIN(srcsize) (((srcsize) >> 8) + 32)
20 struct z_erofs_decompressor
{
22 * if destpages have sparsed pages, fill them with bounce pages.
23 * it also check whether destpages indicate continuous physical memory.
25 int (*prepare_destpages
)(struct z_erofs_decompress_req
*rq
,
26 struct list_head
*pagepool
);
27 int (*decompress
)(struct z_erofs_decompress_req
*rq
, u8
*out
);
31 static int z_erofs_lz4_prepare_destpages(struct z_erofs_decompress_req
*rq
,
32 struct list_head
*pagepool
)
34 const unsigned int nr
=
35 PAGE_ALIGN(rq
->pageofs_out
+ rq
->outputsize
) >> PAGE_SHIFT
;
36 struct page
*availables
[LZ4_MAX_DISTANCE_PAGES
] = { NULL
};
37 unsigned long bounced
[DIV_ROUND_UP(LZ4_MAX_DISTANCE_PAGES
,
38 BITS_PER_LONG
)] = { 0 };
40 unsigned int i
, j
, top
;
43 for (i
= j
= 0; i
< nr
; ++i
, ++j
) {
44 struct page
*const page
= rq
->out
[i
];
47 if (j
>= LZ4_MAX_DISTANCE_PAGES
)
50 /* 'valid' bounced can only be tested after a complete round */
51 if (test_bit(j
, bounced
)) {
52 DBG_BUGON(i
< LZ4_MAX_DISTANCE_PAGES
);
53 DBG_BUGON(top
>= LZ4_MAX_DISTANCE_PAGES
);
54 availables
[top
++] = rq
->out
[i
- LZ4_MAX_DISTANCE_PAGES
];
58 __clear_bit(j
, bounced
);
60 if (kaddr
+ PAGE_SIZE
== page_address(page
))
65 kaddr
= page_address(page
);
70 __set_bit(j
, bounced
);
73 victim
= availables
[--top
];
76 victim
= erofs_allocpage(pagepool
, GFP_KERNEL
);
79 set_page_private(victim
, Z_EROFS_SHORTLIVED_PAGE
);
86 static void *generic_copy_inplace_data(struct z_erofs_decompress_req
*rq
,
87 u8
*src
, unsigned int pageofs_in
)
90 * if in-place decompression is ongoing, those decompressed
91 * pages should be copied in order to avoid being overlapped.
93 struct page
**in
= rq
->in
;
94 u8
*const tmp
= erofs_get_pcpubuf(0);
96 unsigned int inlen
= rq
->inputsize
- pageofs_in
;
97 unsigned int count
= min_t(uint
, inlen
, PAGE_SIZE
- pageofs_in
);
99 while (tmpp
< tmp
+ inlen
) {
101 src
= kmap_atomic(*in
);
102 memcpy(tmpp
, src
+ pageofs_in
, count
);
113 static int z_erofs_lz4_decompress(struct z_erofs_decompress_req
*rq
, u8
*out
)
115 unsigned int inputmargin
, inlen
;
117 bool copied
, support_0padding
;
120 if (rq
->inputsize
> PAGE_SIZE
)
123 src
= kmap_atomic(*rq
->in
);
125 support_0padding
= false;
127 /* decompression inplace is only safe when 0padding is enabled */
128 if (EROFS_SB(rq
->sb
)->feature_incompat
&
129 EROFS_FEATURE_INCOMPAT_LZ4_0PADDING
) {
130 support_0padding
= true;
132 while (!src
[inputmargin
& ~PAGE_MASK
])
133 if (!(++inputmargin
& ~PAGE_MASK
))
136 if (inputmargin
>= rq
->inputsize
) {
143 inlen
= rq
->inputsize
- inputmargin
;
144 if (rq
->inplace_io
) {
145 const uint oend
= (rq
->pageofs_out
+
146 rq
->outputsize
) & ~PAGE_MASK
;
147 const uint nr
= PAGE_ALIGN(rq
->pageofs_out
+
148 rq
->outputsize
) >> PAGE_SHIFT
;
150 if (rq
->partial_decoding
|| !support_0padding
||
151 rq
->out
[nr
- 1] != rq
->in
[0] ||
152 rq
->inputsize
- oend
<
153 LZ4_DECOMPRESS_INPLACE_MARGIN(inlen
)) {
154 src
= generic_copy_inplace_data(rq
, src
, inputmargin
);
160 /* legacy format could compress extra data in a pcluster. */
161 if (rq
->partial_decoding
|| !support_0padding
)
162 ret
= LZ4_decompress_safe_partial(src
+ inputmargin
, out
,
163 inlen
, rq
->outputsize
,
166 ret
= LZ4_decompress_safe(src
+ inputmargin
, out
,
167 inlen
, rq
->outputsize
);
169 if (ret
!= rq
->outputsize
) {
170 erofs_err(rq
->sb
, "failed to decompress %d in[%u, %u] out[%u]",
171 ret
, inlen
, inputmargin
, rq
->outputsize
);
174 print_hex_dump(KERN_DEBUG
, "[ in]: ", DUMP_PREFIX_OFFSET
,
175 16, 1, src
+ inputmargin
, inlen
, true);
176 print_hex_dump(KERN_DEBUG
, "[out]: ", DUMP_PREFIX_OFFSET
,
177 16, 1, out
, rq
->outputsize
, true);
180 memset(out
+ ret
, 0, rq
->outputsize
- ret
);
185 erofs_put_pcpubuf(src
);
191 static struct z_erofs_decompressor decompressors
[] = {
192 [Z_EROFS_COMPRESSION_SHIFTED
] = {
195 [Z_EROFS_COMPRESSION_LZ4
] = {
196 .prepare_destpages
= z_erofs_lz4_prepare_destpages
,
197 .decompress
= z_erofs_lz4_decompress
,
202 static void copy_from_pcpubuf(struct page
**out
, const char *dst
,
203 unsigned short pageofs_out
,
204 unsigned int outputsize
)
206 const char *end
= dst
+ outputsize
;
207 const unsigned int righthalf
= PAGE_SIZE
- pageofs_out
;
208 const char *cur
= dst
- pageofs_out
;
211 struct page
*const page
= *out
++;
214 char *buf
= kmap_atomic(page
);
217 memcpy(buf
, cur
, min_t(uint
, PAGE_SIZE
,
220 memcpy(buf
+ pageofs_out
, cur
+ pageofs_out
,
221 min_t(uint
, righthalf
, end
- cur
));
229 static int z_erofs_decompress_generic(struct z_erofs_decompress_req
*rq
,
230 struct list_head
*pagepool
)
232 const unsigned int nrpages_out
=
233 PAGE_ALIGN(rq
->pageofs_out
+ rq
->outputsize
) >> PAGE_SHIFT
;
234 const struct z_erofs_decompressor
*alg
= decompressors
+ rq
->alg
;
235 unsigned int dst_maptype
;
239 if (nrpages_out
== 1 && !rq
->inplace_io
) {
240 DBG_BUGON(!*rq
->out
);
241 dst
= kmap_atomic(*rq
->out
);
247 * For the case of small output size (especially much less
248 * than PAGE_SIZE), memcpy the decompressed data rather than
249 * compressed data is preferred.
251 if (rq
->outputsize
<= PAGE_SIZE
* 7 / 8) {
252 dst
= erofs_get_pcpubuf(0);
256 rq
->inplace_io
= false;
257 ret
= alg
->decompress(rq
, dst
);
259 copy_from_pcpubuf(rq
->out
, dst
, rq
->pageofs_out
,
262 erofs_put_pcpubuf(dst
);
266 ret
= alg
->prepare_destpages(rq
, pagepool
);
270 dst
= page_address(*rq
->out
);
277 dst
= vm_map_ram(rq
->out
, nrpages_out
, -1);
279 /* retry two more times (totally 3 times) */
291 ret
= alg
->decompress(rq
, dst
+ rq
->pageofs_out
);
295 else if (dst_maptype
== 2)
296 vm_unmap_ram(dst
, nrpages_out
);
300 static int z_erofs_shifted_transform(const struct z_erofs_decompress_req
*rq
,
301 struct list_head
*pagepool
)
303 const unsigned int nrpages_out
=
304 PAGE_ALIGN(rq
->pageofs_out
+ rq
->outputsize
) >> PAGE_SHIFT
;
305 const unsigned int righthalf
= PAGE_SIZE
- rq
->pageofs_out
;
306 unsigned char *src
, *dst
;
308 if (nrpages_out
> 2) {
313 if (rq
->out
[0] == *rq
->in
) {
314 DBG_BUGON(nrpages_out
!= 1);
318 src
= kmap_atomic(*rq
->in
);
320 dst
= kmap_atomic(rq
->out
[0]);
321 memcpy(dst
+ rq
->pageofs_out
, src
, righthalf
);
325 if (nrpages_out
== 2) {
326 DBG_BUGON(!rq
->out
[1]);
327 if (rq
->out
[1] == *rq
->in
) {
328 memmove(src
, src
+ righthalf
, rq
->pageofs_out
);
330 dst
= kmap_atomic(rq
->out
[1]);
331 memcpy(dst
, src
+ righthalf
, rq
->pageofs_out
);
339 int z_erofs_decompress(struct z_erofs_decompress_req
*rq
,
340 struct list_head
*pagepool
)
342 if (rq
->alg
== Z_EROFS_COMPRESSION_SHIFTED
)
343 return z_erofs_shifted_transform(rq
, pagepool
);
344 return z_erofs_decompress_generic(rq
, pagepool
);