2 * Copyright (C) 2008 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
18 * Based on jffs2 zlib code:
19 * Copyright © 2001-2007 Red Hat, Inc.
20 * Created by David Woodhouse <dwmw2@infradead.org>
23 #include <linux/kernel.h>
24 #include <linux/slab.h>
25 #include <linux/zlib.h>
26 #include <linux/zutil.h>
28 #include <linux/init.h>
29 #include <linux/err.h>
30 #include <linux/sched.h>
31 #include <linux/pagemap.h>
32 #include <linux/bio.h>
33 #include <linux/refcount.h>
34 #include "compression.h"
39 struct list_head list
;
42 static void zlib_free_workspace(struct list_head
*ws
)
44 struct workspace
*workspace
= list_entry(ws
, struct workspace
, list
);
46 kvfree(workspace
->strm
.workspace
);
47 kfree(workspace
->buf
);
51 static struct list_head
*zlib_alloc_workspace(void)
53 struct workspace
*workspace
;
56 workspace
= kzalloc(sizeof(*workspace
), GFP_KERNEL
);
58 return ERR_PTR(-ENOMEM
);
60 workspacesize
= max(zlib_deflate_workspacesize(MAX_WBITS
, MAX_MEM_LEVEL
),
61 zlib_inflate_workspacesize());
62 workspace
->strm
.workspace
= kvmalloc(workspacesize
, GFP_KERNEL
);
63 workspace
->buf
= kmalloc(PAGE_SIZE
, GFP_KERNEL
);
64 if (!workspace
->strm
.workspace
|| !workspace
->buf
)
67 INIT_LIST_HEAD(&workspace
->list
);
69 return &workspace
->list
;
71 zlib_free_workspace(&workspace
->list
);
72 return ERR_PTR(-ENOMEM
);
75 static int zlib_compress_pages(struct list_head
*ws
,
76 struct address_space
*mapping
,
79 unsigned long *out_pages
,
80 unsigned long *total_in
,
81 unsigned long *total_out
)
83 struct workspace
*workspace
= list_entry(ws
, struct workspace
, list
);
88 struct page
*in_page
= NULL
;
89 struct page
*out_page
= NULL
;
90 unsigned long bytes_left
;
91 unsigned long len
= *total_out
;
92 unsigned long nr_dest_pages
= *out_pages
;
93 const unsigned long max_out
= nr_dest_pages
* PAGE_SIZE
;
99 if (Z_OK
!= zlib_deflateInit(&workspace
->strm
, 3)) {
100 pr_warn("BTRFS: deflateInit failed\n");
105 workspace
->strm
.total_in
= 0;
106 workspace
->strm
.total_out
= 0;
108 in_page
= find_get_page(mapping
, start
>> PAGE_SHIFT
);
109 data_in
= kmap(in_page
);
111 out_page
= alloc_page(GFP_NOFS
| __GFP_HIGHMEM
);
112 if (out_page
== NULL
) {
116 cpage_out
= kmap(out_page
);
120 workspace
->strm
.next_in
= data_in
;
121 workspace
->strm
.next_out
= cpage_out
;
122 workspace
->strm
.avail_out
= PAGE_SIZE
;
123 workspace
->strm
.avail_in
= min(len
, PAGE_SIZE
);
125 while (workspace
->strm
.total_in
< len
) {
126 ret
= zlib_deflate(&workspace
->strm
, Z_SYNC_FLUSH
);
128 pr_debug("BTRFS: deflate in loop returned %d\n",
130 zlib_deflateEnd(&workspace
->strm
);
135 /* we're making it bigger, give up */
136 if (workspace
->strm
.total_in
> 8192 &&
137 workspace
->strm
.total_in
<
138 workspace
->strm
.total_out
) {
142 /* we need another page for writing out. Test this
143 * before the total_in so we will pull in a new page for
144 * the stream end if required
146 if (workspace
->strm
.avail_out
== 0) {
148 if (nr_pages
== nr_dest_pages
) {
153 out_page
= alloc_page(GFP_NOFS
| __GFP_HIGHMEM
);
154 if (out_page
== NULL
) {
158 cpage_out
= kmap(out_page
);
159 pages
[nr_pages
] = out_page
;
161 workspace
->strm
.avail_out
= PAGE_SIZE
;
162 workspace
->strm
.next_out
= cpage_out
;
165 if (workspace
->strm
.total_in
>= len
)
168 /* we've read in a full page, get a new one */
169 if (workspace
->strm
.avail_in
== 0) {
170 if (workspace
->strm
.total_out
> max_out
)
173 bytes_left
= len
- workspace
->strm
.total_in
;
178 in_page
= find_get_page(mapping
,
179 start
>> PAGE_SHIFT
);
180 data_in
= kmap(in_page
);
181 workspace
->strm
.avail_in
= min(bytes_left
,
183 workspace
->strm
.next_in
= data_in
;
186 workspace
->strm
.avail_in
= 0;
187 ret
= zlib_deflate(&workspace
->strm
, Z_FINISH
);
188 zlib_deflateEnd(&workspace
->strm
);
190 if (ret
!= Z_STREAM_END
) {
195 if (workspace
->strm
.total_out
>= workspace
->strm
.total_in
) {
201 *total_out
= workspace
->strm
.total_out
;
202 *total_in
= workspace
->strm
.total_in
;
204 *out_pages
= nr_pages
;
215 static int zlib_decompress_bio(struct list_head
*ws
, struct compressed_bio
*cb
)
217 struct workspace
*workspace
= list_entry(ws
, struct workspace
, list
);
219 int wbits
= MAX_WBITS
;
221 size_t total_out
= 0;
222 unsigned long page_in_index
= 0;
223 size_t srclen
= cb
->compressed_len
;
224 unsigned long total_pages_in
= DIV_ROUND_UP(srclen
, PAGE_SIZE
);
225 unsigned long buf_start
;
226 struct page
**pages_in
= cb
->compressed_pages
;
227 u64 disk_start
= cb
->start
;
228 struct bio
*orig_bio
= cb
->orig_bio
;
230 data_in
= kmap(pages_in
[page_in_index
]);
231 workspace
->strm
.next_in
= data_in
;
232 workspace
->strm
.avail_in
= min_t(size_t, srclen
, PAGE_SIZE
);
233 workspace
->strm
.total_in
= 0;
235 workspace
->strm
.total_out
= 0;
236 workspace
->strm
.next_out
= workspace
->buf
;
237 workspace
->strm
.avail_out
= PAGE_SIZE
;
239 /* If it's deflate, and it's got no preset dictionary, then
240 we can tell zlib to skip the adler32 check. */
241 if (srclen
> 2 && !(data_in
[1] & PRESET_DICT
) &&
242 ((data_in
[0] & 0x0f) == Z_DEFLATED
) &&
243 !(((data_in
[0]<<8) + data_in
[1]) % 31)) {
245 wbits
= -((data_in
[0] >> 4) + 8);
246 workspace
->strm
.next_in
+= 2;
247 workspace
->strm
.avail_in
-= 2;
250 if (Z_OK
!= zlib_inflateInit2(&workspace
->strm
, wbits
)) {
251 pr_warn("BTRFS: inflateInit failed\n");
252 kunmap(pages_in
[page_in_index
]);
255 while (workspace
->strm
.total_in
< srclen
) {
256 ret
= zlib_inflate(&workspace
->strm
, Z_NO_FLUSH
);
257 if (ret
!= Z_OK
&& ret
!= Z_STREAM_END
)
260 buf_start
= total_out
;
261 total_out
= workspace
->strm
.total_out
;
263 /* we didn't make progress in this inflate call, we're done */
264 if (buf_start
== total_out
)
267 ret2
= btrfs_decompress_buf2page(workspace
->buf
, buf_start
,
268 total_out
, disk_start
,
275 workspace
->strm
.next_out
= workspace
->buf
;
276 workspace
->strm
.avail_out
= PAGE_SIZE
;
278 if (workspace
->strm
.avail_in
== 0) {
280 kunmap(pages_in
[page_in_index
]);
282 if (page_in_index
>= total_pages_in
) {
286 data_in
= kmap(pages_in
[page_in_index
]);
287 workspace
->strm
.next_in
= data_in
;
288 tmp
= srclen
- workspace
->strm
.total_in
;
289 workspace
->strm
.avail_in
= min(tmp
,
293 if (ret
!= Z_STREAM_END
)
298 zlib_inflateEnd(&workspace
->strm
);
300 kunmap(pages_in
[page_in_index
]);
302 zero_fill_bio(orig_bio
);
306 static int zlib_decompress(struct list_head
*ws
, unsigned char *data_in
,
307 struct page
*dest_page
,
308 unsigned long start_byte
,
309 size_t srclen
, size_t destlen
)
311 struct workspace
*workspace
= list_entry(ws
, struct workspace
, list
);
313 int wbits
= MAX_WBITS
;
314 unsigned long bytes_left
;
315 unsigned long total_out
= 0;
316 unsigned long pg_offset
= 0;
319 destlen
= min_t(unsigned long, destlen
, PAGE_SIZE
);
320 bytes_left
= destlen
;
322 workspace
->strm
.next_in
= data_in
;
323 workspace
->strm
.avail_in
= srclen
;
324 workspace
->strm
.total_in
= 0;
326 workspace
->strm
.next_out
= workspace
->buf
;
327 workspace
->strm
.avail_out
= PAGE_SIZE
;
328 workspace
->strm
.total_out
= 0;
329 /* If it's deflate, and it's got no preset dictionary, then
330 we can tell zlib to skip the adler32 check. */
331 if (srclen
> 2 && !(data_in
[1] & PRESET_DICT
) &&
332 ((data_in
[0] & 0x0f) == Z_DEFLATED
) &&
333 !(((data_in
[0]<<8) + data_in
[1]) % 31)) {
335 wbits
= -((data_in
[0] >> 4) + 8);
336 workspace
->strm
.next_in
+= 2;
337 workspace
->strm
.avail_in
-= 2;
340 if (Z_OK
!= zlib_inflateInit2(&workspace
->strm
, wbits
)) {
341 pr_warn("BTRFS: inflateInit failed\n");
345 while (bytes_left
> 0) {
346 unsigned long buf_start
;
347 unsigned long buf_offset
;
350 ret
= zlib_inflate(&workspace
->strm
, Z_NO_FLUSH
);
351 if (ret
!= Z_OK
&& ret
!= Z_STREAM_END
)
354 buf_start
= total_out
;
355 total_out
= workspace
->strm
.total_out
;
357 if (total_out
== buf_start
) {
362 if (total_out
<= start_byte
)
365 if (total_out
> start_byte
&& buf_start
< start_byte
)
366 buf_offset
= start_byte
- buf_start
;
370 bytes
= min(PAGE_SIZE
- pg_offset
,
371 PAGE_SIZE
- buf_offset
);
372 bytes
= min(bytes
, bytes_left
);
374 kaddr
= kmap_atomic(dest_page
);
375 memcpy(kaddr
+ pg_offset
, workspace
->buf
+ buf_offset
, bytes
);
376 kunmap_atomic(kaddr
);
381 workspace
->strm
.next_out
= workspace
->buf
;
382 workspace
->strm
.avail_out
= PAGE_SIZE
;
385 if (ret
!= Z_STREAM_END
&& bytes_left
!= 0)
390 zlib_inflateEnd(&workspace
->strm
);
393 * this should only happen if zlib returned fewer bytes than we
394 * expected. btrfs_get_block is responsible for zeroing from the
395 * end of the inline extent (destlen) to the end of the page
397 if (pg_offset
< destlen
) {
398 kaddr
= kmap_atomic(dest_page
);
399 memset(kaddr
+ pg_offset
, 0, destlen
- pg_offset
);
400 kunmap_atomic(kaddr
);
405 const struct btrfs_compress_op btrfs_zlib_compress
= {
406 .alloc_workspace
= zlib_alloc_workspace
,
407 .free_workspace
= zlib_free_workspace
,
408 .compress_pages
= zlib_compress_pages
,
409 .decompress_bio
= zlib_decompress_bio
,
410 .decompress
= zlib_decompress
,