1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2019 HUAWEI, Inc.
4 * https://www.huawei.com/
5 * Copyright (C) 2024 Alibaba Cloud
10 #ifndef LZ4_DISTANCE_MAX /* history window size */
11 #define LZ4_DISTANCE_MAX 65535 /* set to maximum value by default */
14 #define LZ4_MAX_DISTANCE_PAGES (DIV_ROUND_UP(LZ4_DISTANCE_MAX, PAGE_SIZE) + 1)
15 #ifndef LZ4_DECOMPRESS_INPLACE_MARGIN
16 #define LZ4_DECOMPRESS_INPLACE_MARGIN(srcsize) (((srcsize) >> 8) + 32)
19 struct z_erofs_lz4_decompress_ctx
{
20 struct z_erofs_decompress_req
*rq
;
21 /* # of encoded, decoded pages */
22 unsigned int inpages
, outpages
;
23 /* decoded block total length (used for in-place decompression) */
27 static int z_erofs_load_lz4_config(struct super_block
*sb
,
28 struct erofs_super_block
*dsb
, void *data
, int size
)
30 struct erofs_sb_info
*sbi
= EROFS_SB(sb
);
31 struct z_erofs_lz4_cfgs
*lz4
= data
;
35 if (size
< sizeof(struct z_erofs_lz4_cfgs
)) {
36 erofs_err(sb
, "invalid lz4 cfgs, size=%u", size
);
39 distance
= le16_to_cpu(lz4
->max_distance
);
41 sbi
->lz4
.max_pclusterblks
= le16_to_cpu(lz4
->max_pclusterblks
);
42 if (!sbi
->lz4
.max_pclusterblks
) {
43 sbi
->lz4
.max_pclusterblks
= 1; /* reserved case */
44 } else if (sbi
->lz4
.max_pclusterblks
>
45 erofs_blknr(sb
, Z_EROFS_PCLUSTER_MAX_SIZE
)) {
46 erofs_err(sb
, "too large lz4 pclusterblks %u",
47 sbi
->lz4
.max_pclusterblks
);
51 distance
= le16_to_cpu(dsb
->u1
.lz4_max_distance
);
52 sbi
->lz4
.max_pclusterblks
= 1;
55 sbi
->lz4
.max_distance_pages
= distance
?
56 DIV_ROUND_UP(distance
, PAGE_SIZE
) + 1 :
57 LZ4_MAX_DISTANCE_PAGES
;
58 return z_erofs_gbuf_growsize(sbi
->lz4
.max_pclusterblks
);
62 * Fill all gaps with bounce pages if it's a sparse page list. Also check if
63 * all physical pages are consecutive, which can be seen for moderate CR.
65 static int z_erofs_lz4_prepare_dstpages(struct z_erofs_lz4_decompress_ctx
*ctx
,
66 struct page
**pagepool
)
68 struct z_erofs_decompress_req
*rq
= ctx
->rq
;
69 struct page
*availables
[LZ4_MAX_DISTANCE_PAGES
] = { NULL
};
70 unsigned long bounced
[DIV_ROUND_UP(LZ4_MAX_DISTANCE_PAGES
,
71 BITS_PER_LONG
)] = { 0 };
72 unsigned int lz4_max_distance_pages
=
73 EROFS_SB(rq
->sb
)->lz4
.max_distance_pages
;
75 unsigned int i
, j
, top
;
78 for (i
= j
= 0; i
< ctx
->outpages
; ++i
, ++j
) {
79 struct page
*const page
= rq
->out
[i
];
82 if (j
>= lz4_max_distance_pages
)
85 /* 'valid' bounced can only be tested after a complete round */
86 if (!rq
->fillgaps
&& test_bit(j
, bounced
)) {
87 DBG_BUGON(i
< lz4_max_distance_pages
);
88 DBG_BUGON(top
>= lz4_max_distance_pages
);
89 availables
[top
++] = rq
->out
[i
- lz4_max_distance_pages
];
93 __clear_bit(j
, bounced
);
94 if (!PageHighMem(page
)) {
96 kaddr
= page_address(page
);
100 kaddr
+ PAGE_SIZE
== page_address(page
)) {
109 __set_bit(j
, bounced
);
112 victim
= availables
[--top
];
114 victim
= __erofs_allocpage(pagepool
, rq
->gfp
, true);
117 set_page_private(victim
, Z_EROFS_SHORTLIVED_PAGE
);
121 return kaddr
? 1 : 0;
124 static void *z_erofs_lz4_handle_overlap(struct z_erofs_lz4_decompress_ctx
*ctx
,
125 void *inpage
, void *out
, unsigned int *inputmargin
,
126 int *maptype
, bool may_inplace
)
128 struct z_erofs_decompress_req
*rq
= ctx
->rq
;
129 unsigned int omargin
, total
, i
;
133 if (rq
->inplace_io
) {
134 omargin
= PAGE_ALIGN(ctx
->oend
) - ctx
->oend
;
135 if (rq
->partial_decoding
|| !may_inplace
||
136 omargin
< LZ4_DECOMPRESS_INPLACE_MARGIN(rq
->inputsize
))
139 for (i
= 0; i
< ctx
->inpages
; ++i
)
140 if (rq
->out
[ctx
->outpages
- ctx
->inpages
+ i
] !=
143 kunmap_local(inpage
);
145 return out
+ ((ctx
->outpages
- ctx
->inpages
) << PAGE_SHIFT
);
148 if (ctx
->inpages
<= 1) {
152 kunmap_local(inpage
);
153 src
= erofs_vm_map_ram(rq
->in
, ctx
->inpages
);
155 return ERR_PTR(-ENOMEM
);
160 /* Or copy compressed data which can be overlapped to per-CPU buffer */
162 src
= z_erofs_get_gbuf(ctx
->inpages
);
165 kunmap_local(inpage
);
166 return ERR_PTR(-EFAULT
);
170 total
= rq
->inputsize
;
172 unsigned int page_copycnt
=
173 min_t(unsigned int, total
, PAGE_SIZE
- *inputmargin
);
176 inpage
= kmap_local_page(*in
);
177 memcpy(tmp
, inpage
+ *inputmargin
, page_copycnt
);
178 kunmap_local(inpage
);
181 total
-= page_copycnt
;
190 * Get the exact inputsize with zero_padding feature.
191 * - For LZ4, it should work if zero_padding feature is on (5.3+);
192 * - For MicroLZMA, it'd be enabled all the time.
194 int z_erofs_fixup_insize(struct z_erofs_decompress_req
*rq
, const char *padbuf
,
195 unsigned int padbufsize
)
199 padend
= memchr_inv(padbuf
, 0, padbufsize
);
201 return -EFSCORRUPTED
;
202 rq
->inputsize
-= padend
- padbuf
;
203 rq
->pageofs_in
+= padend
- padbuf
;
207 static int z_erofs_lz4_decompress_mem(struct z_erofs_lz4_decompress_ctx
*ctx
,
210 struct z_erofs_decompress_req
*rq
= ctx
->rq
;
211 bool support_0padding
= false, may_inplace
= false;
212 unsigned int inputmargin
;
213 u8
*out
, *headpage
, *src
;
216 DBG_BUGON(*rq
->in
== NULL
);
217 headpage
= kmap_local_page(*rq
->in
);
219 /* LZ4 decompression inplace is only safe if zero_padding is enabled */
220 if (erofs_sb_has_zero_padding(EROFS_SB(rq
->sb
))) {
221 support_0padding
= true;
222 ret
= z_erofs_fixup_insize(rq
, headpage
+ rq
->pageofs_in
,
223 min_t(unsigned int, rq
->inputsize
,
224 rq
->sb
->s_blocksize
- rq
->pageofs_in
));
226 kunmap_local(headpage
);
229 may_inplace
= !((rq
->pageofs_in
+ rq
->inputsize
) &
230 (rq
->sb
->s_blocksize
- 1));
233 inputmargin
= rq
->pageofs_in
;
234 src
= z_erofs_lz4_handle_overlap(ctx
, headpage
, dst
, &inputmargin
,
235 &maptype
, may_inplace
);
239 out
= dst
+ rq
->pageofs_out
;
240 /* legacy format could compress extra data in a pcluster. */
241 if (rq
->partial_decoding
|| !support_0padding
)
242 ret
= LZ4_decompress_safe_partial(src
+ inputmargin
, out
,
243 rq
->inputsize
, rq
->outputsize
, rq
->outputsize
);
245 ret
= LZ4_decompress_safe(src
+ inputmargin
, out
,
246 rq
->inputsize
, rq
->outputsize
);
248 if (ret
!= rq
->outputsize
) {
249 erofs_err(rq
->sb
, "failed to decompress %d in[%u, %u] out[%u]",
250 ret
, rq
->inputsize
, inputmargin
, rq
->outputsize
);
252 memset(out
+ ret
, 0, rq
->outputsize
- ret
);
259 kunmap_local(headpage
);
260 } else if (maptype
== 1) {
261 vm_unmap_ram(src
, ctx
->inpages
);
262 } else if (maptype
== 2) {
263 z_erofs_put_gbuf(src
);
264 } else if (maptype
!= 3) {
271 static int z_erofs_lz4_decompress(struct z_erofs_decompress_req
*rq
,
272 struct page
**pagepool
)
274 struct z_erofs_lz4_decompress_ctx ctx
;
275 unsigned int dst_maptype
;
280 ctx
.oend
= rq
->pageofs_out
+ rq
->outputsize
;
281 ctx
.outpages
= PAGE_ALIGN(ctx
.oend
) >> PAGE_SHIFT
;
282 ctx
.inpages
= PAGE_ALIGN(rq
->inputsize
) >> PAGE_SHIFT
;
284 /* one optimized fast path only for non bigpcluster cases yet */
285 if (ctx
.inpages
== 1 && ctx
.outpages
== 1 && !rq
->inplace_io
) {
286 DBG_BUGON(!*rq
->out
);
287 dst
= kmap_local_page(*rq
->out
);
292 /* general decoding path which can be used for all cases */
293 ret
= z_erofs_lz4_prepare_dstpages(&ctx
, pagepool
);
296 } else if (ret
> 0) {
297 dst
= page_address(*rq
->out
);
300 dst
= erofs_vm_map_ram(rq
->out
, ctx
.outpages
);
307 ret
= z_erofs_lz4_decompress_mem(&ctx
, dst
);
310 else if (dst_maptype
== 2)
311 vm_unmap_ram(dst
, ctx
.outpages
);
315 static int z_erofs_transform_plain(struct z_erofs_decompress_req
*rq
,
316 struct page
**pagepool
)
318 const unsigned int nrpages_in
=
319 PAGE_ALIGN(rq
->pageofs_in
+ rq
->inputsize
) >> PAGE_SHIFT
;
320 const unsigned int nrpages_out
=
321 PAGE_ALIGN(rq
->pageofs_out
+ rq
->outputsize
) >> PAGE_SHIFT
;
322 const unsigned int bs
= rq
->sb
->s_blocksize
;
323 unsigned int cur
= 0, ni
= 0, no
, pi
, po
, insz
, cnt
;
326 if (rq
->outputsize
> rq
->inputsize
)
328 if (rq
->alg
== Z_EROFS_COMPRESSION_INTERLACED
) {
329 cur
= bs
- (rq
->pageofs_out
& (bs
- 1));
330 pi
= (rq
->pageofs_in
+ rq
->inputsize
- cur
) & ~PAGE_MASK
;
331 cur
= min(cur
, rq
->outputsize
);
332 if (cur
&& rq
->out
[0]) {
333 kin
= kmap_local_page(rq
->in
[nrpages_in
- 1]);
334 if (rq
->out
[0] == rq
->in
[nrpages_in
- 1]) {
335 memmove(kin
+ rq
->pageofs_out
, kin
+ pi
, cur
);
336 flush_dcache_page(rq
->out
[0]);
338 memcpy_to_page(rq
->out
[0], rq
->pageofs_out
,
343 rq
->outputsize
-= cur
;
346 for (; rq
->outputsize
; rq
->pageofs_in
= 0, cur
+= PAGE_SIZE
, ni
++) {
347 insz
= min(PAGE_SIZE
- rq
->pageofs_in
, rq
->outputsize
);
348 rq
->outputsize
-= insz
;
351 kin
= kmap_local_page(rq
->in
[ni
]);
354 no
= (rq
->pageofs_out
+ cur
+ pi
) >> PAGE_SHIFT
;
355 po
= (rq
->pageofs_out
+ cur
+ pi
) & ~PAGE_MASK
;
356 DBG_BUGON(no
>= nrpages_out
);
357 cnt
= min(insz
- pi
, PAGE_SIZE
- po
);
358 if (rq
->out
[no
] == rq
->in
[ni
]) {
360 kin
+ rq
->pageofs_in
+ pi
, cnt
);
361 flush_dcache_page(rq
->out
[no
]);
362 } else if (rq
->out
[no
]) {
363 memcpy_to_page(rq
->out
[no
], po
,
364 kin
+ rq
->pageofs_in
+ pi
, cnt
);
370 DBG_BUGON(ni
> nrpages_in
);
374 int z_erofs_stream_switch_bufs(struct z_erofs_stream_dctx
*dctx
, void **dst
,
375 void **src
, struct page
**pgpl
)
377 struct z_erofs_decompress_req
*rq
= dctx
->rq
;
378 struct super_block
*sb
= rq
->sb
;
379 struct page
**pgo
, *tmppage
;
382 if (!dctx
->avail_out
) {
383 if (++dctx
->no
>= dctx
->outpages
|| !rq
->outputsize
) {
384 erofs_err(sb
, "insufficient space for decompressed data");
385 return -EFSCORRUPTED
;
389 kunmap_local(dctx
->kout
);
390 dctx
->avail_out
= min(rq
->outputsize
, PAGE_SIZE
- rq
->pageofs_out
);
391 rq
->outputsize
-= dctx
->avail_out
;
392 pgo
= &rq
->out
[dctx
->no
];
393 if (!*pgo
&& rq
->fillgaps
) { /* deduped */
394 *pgo
= erofs_allocpage(pgpl
, rq
->gfp
);
399 set_page_private(*pgo
, Z_EROFS_SHORTLIVED_PAGE
);
402 dctx
->kout
= kmap_local_page(*pgo
);
403 *dst
= dctx
->kout
+ rq
->pageofs_out
;
405 *dst
= dctx
->kout
= NULL
;
410 if (dctx
->inbuf_pos
== dctx
->inbuf_sz
&& rq
->inputsize
) {
411 if (++dctx
->ni
>= dctx
->inpages
) {
412 erofs_err(sb
, "invalid compressed data");
413 return -EFSCORRUPTED
;
415 if (dctx
->kout
) /* unlike kmap(), take care of the orders */
416 kunmap_local(dctx
->kout
);
417 kunmap_local(dctx
->kin
);
419 dctx
->inbuf_sz
= min_t(u32
, rq
->inputsize
, PAGE_SIZE
);
420 rq
->inputsize
-= dctx
->inbuf_sz
;
421 dctx
->kin
= kmap_local_page(rq
->in
[dctx
->ni
]);
423 dctx
->bounced
= false;
425 j
= (u8
*)*dst
- dctx
->kout
;
426 dctx
->kout
= kmap_local_page(rq
->out
[dctx
->no
]);
427 *dst
= dctx
->kout
+ j
;
433 * Handle overlapping: Use the given bounce buffer if the input data is
434 * under processing; Or utilize short-lived pages from the on-stack page
435 * pool, where pages are shared among the same request. Note that only
436 * a few inplace I/O pages need to be doubled.
438 if (!dctx
->bounced
&& rq
->out
[dctx
->no
] == rq
->in
[dctx
->ni
]) {
439 memcpy(dctx
->bounce
, *src
, dctx
->inbuf_sz
);
441 dctx
->bounced
= true;
444 for (j
= dctx
->ni
+ 1; j
< dctx
->inpages
; ++j
) {
445 if (rq
->out
[dctx
->no
] != rq
->in
[j
])
447 tmppage
= erofs_allocpage(pgpl
, rq
->gfp
);
450 set_page_private(tmppage
, Z_EROFS_SHORTLIVED_PAGE
);
451 copy_highpage(tmppage
, rq
->in
[j
]);
457 const struct z_erofs_decompressor
*z_erofs_decomp
[] = {
458 [Z_EROFS_COMPRESSION_SHIFTED
] = &(const struct z_erofs_decompressor
) {
459 .decompress
= z_erofs_transform_plain
,
462 [Z_EROFS_COMPRESSION_INTERLACED
] = &(const struct z_erofs_decompressor
) {
463 .decompress
= z_erofs_transform_plain
,
466 [Z_EROFS_COMPRESSION_LZ4
] = &(const struct z_erofs_decompressor
) {
467 .config
= z_erofs_load_lz4_config
,
468 .decompress
= z_erofs_lz4_decompress
,
469 .init
= z_erofs_gbuf_init
,
470 .exit
= z_erofs_gbuf_exit
,
473 #ifdef CONFIG_EROFS_FS_ZIP_LZMA
474 [Z_EROFS_COMPRESSION_LZMA
] = &z_erofs_lzma_decomp
,
476 #ifdef CONFIG_EROFS_FS_ZIP_DEFLATE
477 [Z_EROFS_COMPRESSION_DEFLATE
] = &z_erofs_deflate_decomp
,
479 #ifdef CONFIG_EROFS_FS_ZIP_ZSTD
480 [Z_EROFS_COMPRESSION_ZSTD
] = &z_erofs_zstd_decomp
,
484 int z_erofs_parse_cfgs(struct super_block
*sb
, struct erofs_super_block
*dsb
)
486 struct erofs_sb_info
*sbi
= EROFS_SB(sb
);
487 struct erofs_buf buf
= __EROFS_BUF_INITIALIZER
;
488 unsigned int algs
, alg
;
492 if (!erofs_sb_has_compr_cfgs(sbi
)) {
493 sbi
->available_compr_algs
= 1 << Z_EROFS_COMPRESSION_LZ4
;
494 return z_erofs_load_lz4_config(sb
, dsb
, NULL
, 0);
497 sbi
->available_compr_algs
= le16_to_cpu(dsb
->u1
.available_compr_algs
);
498 if (sbi
->available_compr_algs
& ~Z_EROFS_ALL_COMPR_ALGS
) {
499 erofs_err(sb
, "unidentified algorithms %x, please upgrade kernel",
500 sbi
->available_compr_algs
& ~Z_EROFS_ALL_COMPR_ALGS
);
504 erofs_init_metabuf(&buf
, sb
);
505 offset
= EROFS_SUPER_OFFSET
+ sbi
->sb_size
;
507 for (algs
= sbi
->available_compr_algs
; algs
; algs
>>= 1, ++alg
) {
508 const struct z_erofs_decompressor
*dec
= z_erofs_decomp
[alg
];
514 data
= erofs_read_metadata(sb
, &buf
, &offset
, &size
);
520 if (alg
< Z_EROFS_COMPRESSION_MAX
&& dec
&& dec
->config
) {
521 ret
= dec
->config(sb
, dsb
, data
, size
);
523 erofs_err(sb
, "algorithm %d isn't enabled on this kernel",
531 erofs_put_metabuf(&buf
);
535 int __init
z_erofs_init_decompressor(void)
539 for (i
= 0; i
< Z_EROFS_COMPRESSION_MAX
; ++i
) {
540 err
= z_erofs_decomp
[i
] ? z_erofs_decomp
[i
]->init() : 0;
543 if (z_erofs_decomp
[i
])
544 z_erofs_decomp
[i
]->exit();
551 void z_erofs_exit_decompressor(void)
555 for (i
= 0; i
< Z_EROFS_COMPRESSION_MAX
; ++i
)
556 if (z_erofs_decomp
[i
])
557 z_erofs_decomp
[i
]->exit();