1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2019 HUAWEI, Inc.
4 * http://www.huawei.com/
5 * Created by Gao Xiang <gaoxiang25@huawei.com>
8 #include <linux/module.h>
11 #ifndef LZ4_DISTANCE_MAX /* history window size */
12 #define LZ4_DISTANCE_MAX 65535 /* set to maximum value by default */
15 #define LZ4_MAX_DISTANCE_PAGES (DIV_ROUND_UP(LZ4_DISTANCE_MAX, PAGE_SIZE) + 1)
16 #ifndef LZ4_DECOMPRESS_INPLACE_MARGIN
17 #define LZ4_DECOMPRESS_INPLACE_MARGIN(srcsize) (((srcsize) >> 8) + 32)
20 struct z_erofs_decompressor
{
22 * if destpages have sparsed pages, fill them with bounce pages.
23 * it also check whether destpages indicate continuous physical memory.
25 int (*prepare_destpages
)(struct z_erofs_decompress_req
*rq
,
26 struct list_head
*pagepool
);
27 int (*decompress
)(struct z_erofs_decompress_req
*rq
, u8
*out
);
31 static int z_erofs_lz4_prepare_destpages(struct z_erofs_decompress_req
*rq
,
32 struct list_head
*pagepool
)
34 const unsigned int nr
=
35 PAGE_ALIGN(rq
->pageofs_out
+ rq
->outputsize
) >> PAGE_SHIFT
;
36 struct page
*availables
[LZ4_MAX_DISTANCE_PAGES
] = { NULL
};
37 unsigned long bounced
[DIV_ROUND_UP(LZ4_MAX_DISTANCE_PAGES
,
38 BITS_PER_LONG
)] = { 0 };
40 unsigned int i
, j
, top
;
43 for (i
= j
= 0; i
< nr
; ++i
, ++j
) {
44 struct page
*const page
= rq
->out
[i
];
47 if (j
>= LZ4_MAX_DISTANCE_PAGES
)
50 /* 'valid' bounced can only be tested after a complete round */
51 if (test_bit(j
, bounced
)) {
52 DBG_BUGON(i
< LZ4_MAX_DISTANCE_PAGES
);
53 DBG_BUGON(top
>= LZ4_MAX_DISTANCE_PAGES
);
54 availables
[top
++] = rq
->out
[i
- LZ4_MAX_DISTANCE_PAGES
];
58 __clear_bit(j
, bounced
);
60 if (kaddr
+ PAGE_SIZE
== page_address(page
))
65 kaddr
= page_address(page
);
70 __set_bit(j
, bounced
);
73 victim
= availables
[--top
];
76 victim
= erofs_allocpage(pagepool
, GFP_KERNEL
);
79 victim
->mapping
= Z_EROFS_MAPPING_STAGING
;
86 static void *generic_copy_inplace_data(struct z_erofs_decompress_req
*rq
,
87 u8
*src
, unsigned int pageofs_in
)
90 * if in-place decompression is ongoing, those decompressed
91 * pages should be copied in order to avoid being overlapped.
93 struct page
**in
= rq
->in
;
94 u8
*const tmp
= erofs_get_pcpubuf(0);
96 unsigned int inlen
= rq
->inputsize
- pageofs_in
;
97 unsigned int count
= min_t(uint
, inlen
, PAGE_SIZE
- pageofs_in
);
99 while (tmpp
< tmp
+ inlen
) {
101 src
= kmap_atomic(*in
);
102 memcpy(tmpp
, src
+ pageofs_in
, count
);
113 static int z_erofs_lz4_decompress(struct z_erofs_decompress_req
*rq
, u8
*out
)
115 unsigned int inputmargin
, inlen
;
117 bool copied
, support_0padding
;
120 if (rq
->inputsize
> PAGE_SIZE
)
123 src
= kmap_atomic(*rq
->in
);
125 support_0padding
= false;
127 /* decompression inplace is only safe when 0padding is enabled */
128 if (EROFS_SB(rq
->sb
)->feature_incompat
&
129 EROFS_FEATURE_INCOMPAT_LZ4_0PADDING
) {
130 support_0padding
= true;
132 while (!src
[inputmargin
& ~PAGE_MASK
])
133 if (!(++inputmargin
& ~PAGE_MASK
))
136 if (inputmargin
>= rq
->inputsize
) {
143 inlen
= rq
->inputsize
- inputmargin
;
144 if (rq
->inplace_io
) {
145 const uint oend
= (rq
->pageofs_out
+
146 rq
->outputsize
) & ~PAGE_MASK
;
147 const uint nr
= PAGE_ALIGN(rq
->pageofs_out
+
148 rq
->outputsize
) >> PAGE_SHIFT
;
150 if (rq
->partial_decoding
|| !support_0padding
||
151 rq
->out
[nr
- 1] != rq
->in
[0] ||
152 rq
->inputsize
- oend
<
153 LZ4_DECOMPRESS_INPLACE_MARGIN(inlen
)) {
154 src
= generic_copy_inplace_data(rq
, src
, inputmargin
);
160 ret
= LZ4_decompress_safe_partial(src
+ inputmargin
, out
,
161 inlen
, rq
->outputsize
,
164 erofs_err(rq
->sb
, "failed to decompress, in[%u, %u] out[%u]",
165 inlen
, inputmargin
, rq
->outputsize
);
167 print_hex_dump(KERN_DEBUG
, "[ in]: ", DUMP_PREFIX_OFFSET
,
168 16, 1, src
+ inputmargin
, inlen
, true);
169 print_hex_dump(KERN_DEBUG
, "[out]: ", DUMP_PREFIX_OFFSET
,
170 16, 1, out
, rq
->outputsize
, true);
175 erofs_put_pcpubuf(src
);
181 static struct z_erofs_decompressor decompressors
[] = {
182 [Z_EROFS_COMPRESSION_SHIFTED
] = {
185 [Z_EROFS_COMPRESSION_LZ4
] = {
186 .prepare_destpages
= z_erofs_lz4_prepare_destpages
,
187 .decompress
= z_erofs_lz4_decompress
,
192 static void copy_from_pcpubuf(struct page
**out
, const char *dst
,
193 unsigned short pageofs_out
,
194 unsigned int outputsize
)
196 const char *end
= dst
+ outputsize
;
197 const unsigned int righthalf
= PAGE_SIZE
- pageofs_out
;
198 const char *cur
= dst
- pageofs_out
;
201 struct page
*const page
= *out
++;
204 char *buf
= kmap_atomic(page
);
207 memcpy(buf
, cur
, min_t(uint
, PAGE_SIZE
,
210 memcpy(buf
+ pageofs_out
, cur
+ pageofs_out
,
211 min_t(uint
, righthalf
, end
- cur
));
219 static int z_erofs_decompress_generic(struct z_erofs_decompress_req
*rq
,
220 struct list_head
*pagepool
)
222 const unsigned int nrpages_out
=
223 PAGE_ALIGN(rq
->pageofs_out
+ rq
->outputsize
) >> PAGE_SHIFT
;
224 const struct z_erofs_decompressor
*alg
= decompressors
+ rq
->alg
;
225 unsigned int dst_maptype
;
229 if (nrpages_out
== 1 && !rq
->inplace_io
) {
230 DBG_BUGON(!*rq
->out
);
231 dst
= kmap_atomic(*rq
->out
);
237 * For the case of small output size (especially much less
238 * than PAGE_SIZE), memcpy the decompressed data rather than
239 * compressed data is preferred.
241 if (rq
->outputsize
<= PAGE_SIZE
* 7 / 8) {
242 dst
= erofs_get_pcpubuf(0);
246 rq
->inplace_io
= false;
247 ret
= alg
->decompress(rq
, dst
);
249 copy_from_pcpubuf(rq
->out
, dst
, rq
->pageofs_out
,
252 erofs_put_pcpubuf(dst
);
256 ret
= alg
->prepare_destpages(rq
, pagepool
);
260 dst
= page_address(*rq
->out
);
267 dst
= vm_map_ram(rq
->out
, nrpages_out
, -1, PAGE_KERNEL
);
269 /* retry two more times (totally 3 times) */
281 ret
= alg
->decompress(rq
, dst
+ rq
->pageofs_out
);
285 else if (dst_maptype
== 2)
286 vm_unmap_ram(dst
, nrpages_out
);
290 static int z_erofs_shifted_transform(const struct z_erofs_decompress_req
*rq
,
291 struct list_head
*pagepool
)
293 const unsigned int nrpages_out
=
294 PAGE_ALIGN(rq
->pageofs_out
+ rq
->outputsize
) >> PAGE_SHIFT
;
295 const unsigned int righthalf
= PAGE_SIZE
- rq
->pageofs_out
;
296 unsigned char *src
, *dst
;
298 if (nrpages_out
> 2) {
303 if (rq
->out
[0] == *rq
->in
) {
304 DBG_BUGON(nrpages_out
!= 1);
308 src
= kmap_atomic(*rq
->in
);
310 dst
= kmap_atomic(rq
->out
[0]);
311 memcpy(dst
+ rq
->pageofs_out
, src
, righthalf
);
315 if (nrpages_out
== 2) {
316 DBG_BUGON(!rq
->out
[1]);
317 if (rq
->out
[1] == *rq
->in
) {
318 memmove(src
, src
+ righthalf
, rq
->pageofs_out
);
320 dst
= kmap_atomic(rq
->out
[1]);
321 memcpy(dst
, src
+ righthalf
, rq
->pageofs_out
);
329 int z_erofs_decompress(struct z_erofs_decompress_req
*rq
,
330 struct list_head
*pagepool
)
332 if (rq
->alg
== Z_EROFS_COMPRESSION_SHIFTED
)
333 return z_erofs_shifted_transform(rq
, pagepool
);
334 return z_erofs_decompress_generic(rq
, pagepool
);