1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2008 Oracle. All rights reserved.
5 * Based on jffs2 zlib code:
6 * Copyright © 2001-2007 Red Hat, Inc.
7 * Created by David Woodhouse <dwmw2@infradead.org>
10 #include <linux/kernel.h>
11 #include <linux/slab.h>
12 #include <linux/zlib.h>
13 #include <linux/zutil.h>
15 #include <linux/init.h>
16 #include <linux/err.h>
17 #include <linux/sched.h>
18 #include <linux/pagemap.h>
19 #include <linux/bio.h>
20 #include <linux/refcount.h>
21 #include "compression.h"
26 struct list_head list
;
30 static void zlib_free_workspace(struct list_head
*ws
)
32 struct workspace
*workspace
= list_entry(ws
, struct workspace
, list
);
34 kvfree(workspace
->strm
.workspace
);
35 kfree(workspace
->buf
);
39 static struct list_head
*zlib_alloc_workspace(void)
41 struct workspace
*workspace
;
44 workspace
= kzalloc(sizeof(*workspace
), GFP_KERNEL
);
46 return ERR_PTR(-ENOMEM
);
48 workspacesize
= max(zlib_deflate_workspacesize(MAX_WBITS
, MAX_MEM_LEVEL
),
49 zlib_inflate_workspacesize());
50 workspace
->strm
.workspace
= kvmalloc(workspacesize
, GFP_KERNEL
);
51 workspace
->buf
= kmalloc(PAGE_SIZE
, GFP_KERNEL
);
52 if (!workspace
->strm
.workspace
|| !workspace
->buf
)
55 INIT_LIST_HEAD(&workspace
->list
);
57 return &workspace
->list
;
59 zlib_free_workspace(&workspace
->list
);
60 return ERR_PTR(-ENOMEM
);
63 static int zlib_compress_pages(struct list_head
*ws
,
64 struct address_space
*mapping
,
67 unsigned long *out_pages
,
68 unsigned long *total_in
,
69 unsigned long *total_out
)
71 struct workspace
*workspace
= list_entry(ws
, struct workspace
, list
);
76 struct page
*in_page
= NULL
;
77 struct page
*out_page
= NULL
;
78 unsigned long bytes_left
;
79 unsigned long len
= *total_out
;
80 unsigned long nr_dest_pages
= *out_pages
;
81 const unsigned long max_out
= nr_dest_pages
* PAGE_SIZE
;
87 if (Z_OK
!= zlib_deflateInit(&workspace
->strm
, workspace
->level
)) {
88 pr_warn("BTRFS: deflateInit failed\n");
93 workspace
->strm
.total_in
= 0;
94 workspace
->strm
.total_out
= 0;
96 in_page
= find_get_page(mapping
, start
>> PAGE_SHIFT
);
97 data_in
= kmap(in_page
);
99 out_page
= alloc_page(GFP_NOFS
| __GFP_HIGHMEM
);
100 if (out_page
== NULL
) {
104 cpage_out
= kmap(out_page
);
108 workspace
->strm
.next_in
= data_in
;
109 workspace
->strm
.next_out
= cpage_out
;
110 workspace
->strm
.avail_out
= PAGE_SIZE
;
111 workspace
->strm
.avail_in
= min(len
, PAGE_SIZE
);
113 while (workspace
->strm
.total_in
< len
) {
114 ret
= zlib_deflate(&workspace
->strm
, Z_SYNC_FLUSH
);
116 pr_debug("BTRFS: deflate in loop returned %d\n",
118 zlib_deflateEnd(&workspace
->strm
);
123 /* we're making it bigger, give up */
124 if (workspace
->strm
.total_in
> 8192 &&
125 workspace
->strm
.total_in
<
126 workspace
->strm
.total_out
) {
130 /* we need another page for writing out. Test this
131 * before the total_in so we will pull in a new page for
132 * the stream end if required
134 if (workspace
->strm
.avail_out
== 0) {
136 if (nr_pages
== nr_dest_pages
) {
141 out_page
= alloc_page(GFP_NOFS
| __GFP_HIGHMEM
);
142 if (out_page
== NULL
) {
146 cpage_out
= kmap(out_page
);
147 pages
[nr_pages
] = out_page
;
149 workspace
->strm
.avail_out
= PAGE_SIZE
;
150 workspace
->strm
.next_out
= cpage_out
;
153 if (workspace
->strm
.total_in
>= len
)
156 /* we've read in a full page, get a new one */
157 if (workspace
->strm
.avail_in
== 0) {
158 if (workspace
->strm
.total_out
> max_out
)
161 bytes_left
= len
- workspace
->strm
.total_in
;
166 in_page
= find_get_page(mapping
,
167 start
>> PAGE_SHIFT
);
168 data_in
= kmap(in_page
);
169 workspace
->strm
.avail_in
= min(bytes_left
,
171 workspace
->strm
.next_in
= data_in
;
174 workspace
->strm
.avail_in
= 0;
175 ret
= zlib_deflate(&workspace
->strm
, Z_FINISH
);
176 zlib_deflateEnd(&workspace
->strm
);
178 if (ret
!= Z_STREAM_END
) {
183 if (workspace
->strm
.total_out
>= workspace
->strm
.total_in
) {
189 *total_out
= workspace
->strm
.total_out
;
190 *total_in
= workspace
->strm
.total_in
;
192 *out_pages
= nr_pages
;
203 static int zlib_decompress_bio(struct list_head
*ws
, struct compressed_bio
*cb
)
205 struct workspace
*workspace
= list_entry(ws
, struct workspace
, list
);
207 int wbits
= MAX_WBITS
;
209 size_t total_out
= 0;
210 unsigned long page_in_index
= 0;
211 size_t srclen
= cb
->compressed_len
;
212 unsigned long total_pages_in
= DIV_ROUND_UP(srclen
, PAGE_SIZE
);
213 unsigned long buf_start
;
214 struct page
**pages_in
= cb
->compressed_pages
;
215 u64 disk_start
= cb
->start
;
216 struct bio
*orig_bio
= cb
->orig_bio
;
218 data_in
= kmap(pages_in
[page_in_index
]);
219 workspace
->strm
.next_in
= data_in
;
220 workspace
->strm
.avail_in
= min_t(size_t, srclen
, PAGE_SIZE
);
221 workspace
->strm
.total_in
= 0;
223 workspace
->strm
.total_out
= 0;
224 workspace
->strm
.next_out
= workspace
->buf
;
225 workspace
->strm
.avail_out
= PAGE_SIZE
;
227 /* If it's deflate, and it's got no preset dictionary, then
228 we can tell zlib to skip the adler32 check. */
229 if (srclen
> 2 && !(data_in
[1] & PRESET_DICT
) &&
230 ((data_in
[0] & 0x0f) == Z_DEFLATED
) &&
231 !(((data_in
[0]<<8) + data_in
[1]) % 31)) {
233 wbits
= -((data_in
[0] >> 4) + 8);
234 workspace
->strm
.next_in
+= 2;
235 workspace
->strm
.avail_in
-= 2;
238 if (Z_OK
!= zlib_inflateInit2(&workspace
->strm
, wbits
)) {
239 pr_warn("BTRFS: inflateInit failed\n");
240 kunmap(pages_in
[page_in_index
]);
243 while (workspace
->strm
.total_in
< srclen
) {
244 ret
= zlib_inflate(&workspace
->strm
, Z_NO_FLUSH
);
245 if (ret
!= Z_OK
&& ret
!= Z_STREAM_END
)
248 buf_start
= total_out
;
249 total_out
= workspace
->strm
.total_out
;
251 /* we didn't make progress in this inflate call, we're done */
252 if (buf_start
== total_out
)
255 ret2
= btrfs_decompress_buf2page(workspace
->buf
, buf_start
,
256 total_out
, disk_start
,
263 workspace
->strm
.next_out
= workspace
->buf
;
264 workspace
->strm
.avail_out
= PAGE_SIZE
;
266 if (workspace
->strm
.avail_in
== 0) {
268 kunmap(pages_in
[page_in_index
]);
270 if (page_in_index
>= total_pages_in
) {
274 data_in
= kmap(pages_in
[page_in_index
]);
275 workspace
->strm
.next_in
= data_in
;
276 tmp
= srclen
- workspace
->strm
.total_in
;
277 workspace
->strm
.avail_in
= min(tmp
,
281 if (ret
!= Z_STREAM_END
)
286 zlib_inflateEnd(&workspace
->strm
);
288 kunmap(pages_in
[page_in_index
]);
290 zero_fill_bio(orig_bio
);
294 static int zlib_decompress(struct list_head
*ws
, unsigned char *data_in
,
295 struct page
*dest_page
,
296 unsigned long start_byte
,
297 size_t srclen
, size_t destlen
)
299 struct workspace
*workspace
= list_entry(ws
, struct workspace
, list
);
301 int wbits
= MAX_WBITS
;
302 unsigned long bytes_left
;
303 unsigned long total_out
= 0;
304 unsigned long pg_offset
= 0;
307 destlen
= min_t(unsigned long, destlen
, PAGE_SIZE
);
308 bytes_left
= destlen
;
310 workspace
->strm
.next_in
= data_in
;
311 workspace
->strm
.avail_in
= srclen
;
312 workspace
->strm
.total_in
= 0;
314 workspace
->strm
.next_out
= workspace
->buf
;
315 workspace
->strm
.avail_out
= PAGE_SIZE
;
316 workspace
->strm
.total_out
= 0;
317 /* If it's deflate, and it's got no preset dictionary, then
318 we can tell zlib to skip the adler32 check. */
319 if (srclen
> 2 && !(data_in
[1] & PRESET_DICT
) &&
320 ((data_in
[0] & 0x0f) == Z_DEFLATED
) &&
321 !(((data_in
[0]<<8) + data_in
[1]) % 31)) {
323 wbits
= -((data_in
[0] >> 4) + 8);
324 workspace
->strm
.next_in
+= 2;
325 workspace
->strm
.avail_in
-= 2;
328 if (Z_OK
!= zlib_inflateInit2(&workspace
->strm
, wbits
)) {
329 pr_warn("BTRFS: inflateInit failed\n");
333 while (bytes_left
> 0) {
334 unsigned long buf_start
;
335 unsigned long buf_offset
;
338 ret
= zlib_inflate(&workspace
->strm
, Z_NO_FLUSH
);
339 if (ret
!= Z_OK
&& ret
!= Z_STREAM_END
)
342 buf_start
= total_out
;
343 total_out
= workspace
->strm
.total_out
;
345 if (total_out
== buf_start
) {
350 if (total_out
<= start_byte
)
353 if (total_out
> start_byte
&& buf_start
< start_byte
)
354 buf_offset
= start_byte
- buf_start
;
358 bytes
= min(PAGE_SIZE
- pg_offset
,
359 PAGE_SIZE
- buf_offset
);
360 bytes
= min(bytes
, bytes_left
);
362 kaddr
= kmap_atomic(dest_page
);
363 memcpy(kaddr
+ pg_offset
, workspace
->buf
+ buf_offset
, bytes
);
364 kunmap_atomic(kaddr
);
369 workspace
->strm
.next_out
= workspace
->buf
;
370 workspace
->strm
.avail_out
= PAGE_SIZE
;
373 if (ret
!= Z_STREAM_END
&& bytes_left
!= 0)
378 zlib_inflateEnd(&workspace
->strm
);
381 * this should only happen if zlib returned fewer bytes than we
382 * expected. btrfs_get_block is responsible for zeroing from the
383 * end of the inline extent (destlen) to the end of the page
385 if (pg_offset
< destlen
) {
386 kaddr
= kmap_atomic(dest_page
);
387 memset(kaddr
+ pg_offset
, 0, destlen
- pg_offset
);
388 kunmap_atomic(kaddr
);
393 static void zlib_set_level(struct list_head
*ws
, unsigned int type
)
395 struct workspace
*workspace
= list_entry(ws
, struct workspace
, list
);
396 unsigned level
= (type
& 0xF0) >> 4;
401 workspace
->level
= level
> 0 ? level
: 3;
404 const struct btrfs_compress_op btrfs_zlib_compress
= {
405 .alloc_workspace
= zlib_alloc_workspace
,
406 .free_workspace
= zlib_free_workspace
,
407 .compress_pages
= zlib_compress_pages
,
408 .decompress_bio
= zlib_decompress_bio
,
409 .decompress
= zlib_decompress
,
410 .set_level
= zlib_set_level
,