WIP FPC-III support
[linux/fpc-iii.git] / fs / erofs / decompressor.c
blob1cb1ffd105698594a6b5e1d41f4ae56756e85fb5
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2019 HUAWEI, Inc.
4 * https://www.huawei.com/
5 * Created by Gao Xiang <gaoxiang25@huawei.com>
6 */
7 #include "compress.h"
8 #include <linux/module.h>
9 #include <linux/lz4.h>
11 #ifndef LZ4_DISTANCE_MAX /* history window size */
12 #define LZ4_DISTANCE_MAX 65535 /* set to maximum value by default */
13 #endif
15 #define LZ4_MAX_DISTANCE_PAGES (DIV_ROUND_UP(LZ4_DISTANCE_MAX, PAGE_SIZE) + 1)
16 #ifndef LZ4_DECOMPRESS_INPLACE_MARGIN
17 #define LZ4_DECOMPRESS_INPLACE_MARGIN(srcsize) (((srcsize) >> 8) + 32)
18 #endif
20 struct z_erofs_decompressor {
22 * if destpages have sparsed pages, fill them with bounce pages.
23 * it also check whether destpages indicate continuous physical memory.
25 int (*prepare_destpages)(struct z_erofs_decompress_req *rq,
26 struct list_head *pagepool);
27 int (*decompress)(struct z_erofs_decompress_req *rq, u8 *out);
28 char *name;
31 static int z_erofs_lz4_prepare_destpages(struct z_erofs_decompress_req *rq,
32 struct list_head *pagepool)
34 const unsigned int nr =
35 PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT;
36 struct page *availables[LZ4_MAX_DISTANCE_PAGES] = { NULL };
37 unsigned long bounced[DIV_ROUND_UP(LZ4_MAX_DISTANCE_PAGES,
38 BITS_PER_LONG)] = { 0 };
39 void *kaddr = NULL;
40 unsigned int i, j, top;
42 top = 0;
43 for (i = j = 0; i < nr; ++i, ++j) {
44 struct page *const page = rq->out[i];
45 struct page *victim;
47 if (j >= LZ4_MAX_DISTANCE_PAGES)
48 j = 0;
50 /* 'valid' bounced can only be tested after a complete round */
51 if (test_bit(j, bounced)) {
52 DBG_BUGON(i < LZ4_MAX_DISTANCE_PAGES);
53 DBG_BUGON(top >= LZ4_MAX_DISTANCE_PAGES);
54 availables[top++] = rq->out[i - LZ4_MAX_DISTANCE_PAGES];
57 if (page) {
58 __clear_bit(j, bounced);
59 if (kaddr) {
60 if (kaddr + PAGE_SIZE == page_address(page))
61 kaddr += PAGE_SIZE;
62 else
63 kaddr = NULL;
64 } else if (!i) {
65 kaddr = page_address(page);
67 continue;
69 kaddr = NULL;
70 __set_bit(j, bounced);
72 if (top) {
73 victim = availables[--top];
74 get_page(victim);
75 } else {
76 victim = erofs_allocpage(pagepool, GFP_KERNEL);
77 if (!victim)
78 return -ENOMEM;
79 set_page_private(victim, Z_EROFS_SHORTLIVED_PAGE);
81 rq->out[i] = victim;
83 return kaddr ? 1 : 0;
86 static void *generic_copy_inplace_data(struct z_erofs_decompress_req *rq,
87 u8 *src, unsigned int pageofs_in)
90 * if in-place decompression is ongoing, those decompressed
91 * pages should be copied in order to avoid being overlapped.
93 struct page **in = rq->in;
94 u8 *const tmp = erofs_get_pcpubuf(0);
95 u8 *tmpp = tmp;
96 unsigned int inlen = rq->inputsize - pageofs_in;
97 unsigned int count = min_t(uint, inlen, PAGE_SIZE - pageofs_in);
99 while (tmpp < tmp + inlen) {
100 if (!src)
101 src = kmap_atomic(*in);
102 memcpy(tmpp, src + pageofs_in, count);
103 kunmap_atomic(src);
104 src = NULL;
105 tmpp += count;
106 pageofs_in = 0;
107 count = PAGE_SIZE;
108 ++in;
110 return tmp;
113 static int z_erofs_lz4_decompress(struct z_erofs_decompress_req *rq, u8 *out)
115 unsigned int inputmargin, inlen;
116 u8 *src;
117 bool copied, support_0padding;
118 int ret;
120 if (rq->inputsize > PAGE_SIZE)
121 return -EOPNOTSUPP;
123 src = kmap_atomic(*rq->in);
124 inputmargin = 0;
125 support_0padding = false;
127 /* decompression inplace is only safe when 0padding is enabled */
128 if (EROFS_SB(rq->sb)->feature_incompat &
129 EROFS_FEATURE_INCOMPAT_LZ4_0PADDING) {
130 support_0padding = true;
132 while (!src[inputmargin & ~PAGE_MASK])
133 if (!(++inputmargin & ~PAGE_MASK))
134 break;
136 if (inputmargin >= rq->inputsize) {
137 kunmap_atomic(src);
138 return -EIO;
142 copied = false;
143 inlen = rq->inputsize - inputmargin;
144 if (rq->inplace_io) {
145 const uint oend = (rq->pageofs_out +
146 rq->outputsize) & ~PAGE_MASK;
147 const uint nr = PAGE_ALIGN(rq->pageofs_out +
148 rq->outputsize) >> PAGE_SHIFT;
150 if (rq->partial_decoding || !support_0padding ||
151 rq->out[nr - 1] != rq->in[0] ||
152 rq->inputsize - oend <
153 LZ4_DECOMPRESS_INPLACE_MARGIN(inlen)) {
154 src = generic_copy_inplace_data(rq, src, inputmargin);
155 inputmargin = 0;
156 copied = true;
160 /* legacy format could compress extra data in a pcluster. */
161 if (rq->partial_decoding || !support_0padding)
162 ret = LZ4_decompress_safe_partial(src + inputmargin, out,
163 inlen, rq->outputsize,
164 rq->outputsize);
165 else
166 ret = LZ4_decompress_safe(src + inputmargin, out,
167 inlen, rq->outputsize);
169 if (ret != rq->outputsize) {
170 erofs_err(rq->sb, "failed to decompress %d in[%u, %u] out[%u]",
171 ret, inlen, inputmargin, rq->outputsize);
173 WARN_ON(1);
174 print_hex_dump(KERN_DEBUG, "[ in]: ", DUMP_PREFIX_OFFSET,
175 16, 1, src + inputmargin, inlen, true);
176 print_hex_dump(KERN_DEBUG, "[out]: ", DUMP_PREFIX_OFFSET,
177 16, 1, out, rq->outputsize, true);
179 if (ret >= 0)
180 memset(out + ret, 0, rq->outputsize - ret);
181 ret = -EIO;
184 if (copied)
185 erofs_put_pcpubuf(src);
186 else
187 kunmap_atomic(src);
188 return ret;
191 static struct z_erofs_decompressor decompressors[] = {
192 [Z_EROFS_COMPRESSION_SHIFTED] = {
193 .name = "shifted"
195 [Z_EROFS_COMPRESSION_LZ4] = {
196 .prepare_destpages = z_erofs_lz4_prepare_destpages,
197 .decompress = z_erofs_lz4_decompress,
198 .name = "lz4"
202 static void copy_from_pcpubuf(struct page **out, const char *dst,
203 unsigned short pageofs_out,
204 unsigned int outputsize)
206 const char *end = dst + outputsize;
207 const unsigned int righthalf = PAGE_SIZE - pageofs_out;
208 const char *cur = dst - pageofs_out;
210 while (cur < end) {
211 struct page *const page = *out++;
213 if (page) {
214 char *buf = kmap_atomic(page);
216 if (cur >= dst) {
217 memcpy(buf, cur, min_t(uint, PAGE_SIZE,
218 end - cur));
219 } else {
220 memcpy(buf + pageofs_out, cur + pageofs_out,
221 min_t(uint, righthalf, end - cur));
223 kunmap_atomic(buf);
225 cur += PAGE_SIZE;
229 static int z_erofs_decompress_generic(struct z_erofs_decompress_req *rq,
230 struct list_head *pagepool)
232 const unsigned int nrpages_out =
233 PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT;
234 const struct z_erofs_decompressor *alg = decompressors + rq->alg;
235 unsigned int dst_maptype;
236 void *dst;
237 int ret, i;
239 if (nrpages_out == 1 && !rq->inplace_io) {
240 DBG_BUGON(!*rq->out);
241 dst = kmap_atomic(*rq->out);
242 dst_maptype = 0;
243 goto dstmap_out;
247 * For the case of small output size (especially much less
248 * than PAGE_SIZE), memcpy the decompressed data rather than
249 * compressed data is preferred.
251 if (rq->outputsize <= PAGE_SIZE * 7 / 8) {
252 dst = erofs_get_pcpubuf(0);
253 if (IS_ERR(dst))
254 return PTR_ERR(dst);
256 rq->inplace_io = false;
257 ret = alg->decompress(rq, dst);
258 if (!ret)
259 copy_from_pcpubuf(rq->out, dst, rq->pageofs_out,
260 rq->outputsize);
262 erofs_put_pcpubuf(dst);
263 return ret;
266 ret = alg->prepare_destpages(rq, pagepool);
267 if (ret < 0) {
268 return ret;
269 } else if (ret) {
270 dst = page_address(*rq->out);
271 dst_maptype = 1;
272 goto dstmap_out;
275 i = 0;
276 while (1) {
277 dst = vm_map_ram(rq->out, nrpages_out, -1);
279 /* retry two more times (totally 3 times) */
280 if (dst || ++i >= 3)
281 break;
282 vm_unmap_aliases();
285 if (!dst)
286 return -ENOMEM;
288 dst_maptype = 2;
290 dstmap_out:
291 ret = alg->decompress(rq, dst + rq->pageofs_out);
293 if (!dst_maptype)
294 kunmap_atomic(dst);
295 else if (dst_maptype == 2)
296 vm_unmap_ram(dst, nrpages_out);
297 return ret;
300 static int z_erofs_shifted_transform(const struct z_erofs_decompress_req *rq,
301 struct list_head *pagepool)
303 const unsigned int nrpages_out =
304 PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT;
305 const unsigned int righthalf = PAGE_SIZE - rq->pageofs_out;
306 unsigned char *src, *dst;
308 if (nrpages_out > 2) {
309 DBG_BUGON(1);
310 return -EIO;
313 if (rq->out[0] == *rq->in) {
314 DBG_BUGON(nrpages_out != 1);
315 return 0;
318 src = kmap_atomic(*rq->in);
319 if (rq->out[0]) {
320 dst = kmap_atomic(rq->out[0]);
321 memcpy(dst + rq->pageofs_out, src, righthalf);
322 kunmap_atomic(dst);
325 if (nrpages_out == 2) {
326 DBG_BUGON(!rq->out[1]);
327 if (rq->out[1] == *rq->in) {
328 memmove(src, src + righthalf, rq->pageofs_out);
329 } else {
330 dst = kmap_atomic(rq->out[1]);
331 memcpy(dst, src + righthalf, rq->pageofs_out);
332 kunmap_atomic(dst);
335 kunmap_atomic(src);
336 return 0;
339 int z_erofs_decompress(struct z_erofs_decompress_req *rq,
340 struct list_head *pagepool)
342 if (rq->alg == Z_EROFS_COMPRESSION_SHIFTED)
343 return z_erofs_shifted_transform(rq, pagepool);
344 return z_erofs_decompress_generic(rq, pagepool);