net: Clone skb before setting peeked flag
[linux/fpc-iii.git] / fs / btrfs / zlib.c
blob82990b8f872b6d455be25196ee88a97eb82bd932
1 /*
2 * Copyright (C) 2008 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
18 * Based on jffs2 zlib code:
19 * Copyright © 2001-2007 Red Hat, Inc.
20 * Created by David Woodhouse <dwmw2@infradead.org>
23 #include <linux/kernel.h>
24 #include <linux/slab.h>
25 #include <linux/zlib.h>
26 #include <linux/zutil.h>
27 #include <linux/vmalloc.h>
28 #include <linux/init.h>
29 #include <linux/err.h>
30 #include <linux/sched.h>
31 #include <linux/pagemap.h>
32 #include <linux/bio.h>
33 #include "compression.h"
35 struct workspace {
36 z_stream strm;
37 char *buf;
38 struct list_head list;
41 static void zlib_free_workspace(struct list_head *ws)
43 struct workspace *workspace = list_entry(ws, struct workspace, list);
45 vfree(workspace->strm.workspace);
46 kfree(workspace->buf);
47 kfree(workspace);
50 static struct list_head *zlib_alloc_workspace(void)
52 struct workspace *workspace;
53 int workspacesize;
55 workspace = kzalloc(sizeof(*workspace), GFP_NOFS);
56 if (!workspace)
57 return ERR_PTR(-ENOMEM);
59 workspacesize = max(zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL),
60 zlib_inflate_workspacesize());
61 workspace->strm.workspace = vmalloc(workspacesize);
62 workspace->buf = kmalloc(PAGE_CACHE_SIZE, GFP_NOFS);
63 if (!workspace->strm.workspace || !workspace->buf)
64 goto fail;
66 INIT_LIST_HEAD(&workspace->list);
68 return &workspace->list;
69 fail:
70 zlib_free_workspace(&workspace->list);
71 return ERR_PTR(-ENOMEM);
74 static int zlib_compress_pages(struct list_head *ws,
75 struct address_space *mapping,
76 u64 start, unsigned long len,
77 struct page **pages,
78 unsigned long nr_dest_pages,
79 unsigned long *out_pages,
80 unsigned long *total_in,
81 unsigned long *total_out,
82 unsigned long max_out)
84 struct workspace *workspace = list_entry(ws, struct workspace, list);
85 int ret;
86 char *data_in;
87 char *cpage_out;
88 int nr_pages = 0;
89 struct page *in_page = NULL;
90 struct page *out_page = NULL;
91 unsigned long bytes_left;
93 *out_pages = 0;
94 *total_out = 0;
95 *total_in = 0;
97 if (Z_OK != zlib_deflateInit(&workspace->strm, 3)) {
98 printk(KERN_WARNING "BTRFS: deflateInit failed\n");
99 ret = -EIO;
100 goto out;
103 workspace->strm.total_in = 0;
104 workspace->strm.total_out = 0;
106 in_page = find_get_page(mapping, start >> PAGE_CACHE_SHIFT);
107 data_in = kmap(in_page);
109 out_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
110 if (out_page == NULL) {
111 ret = -ENOMEM;
112 goto out;
114 cpage_out = kmap(out_page);
115 pages[0] = out_page;
116 nr_pages = 1;
118 workspace->strm.next_in = data_in;
119 workspace->strm.next_out = cpage_out;
120 workspace->strm.avail_out = PAGE_CACHE_SIZE;
121 workspace->strm.avail_in = min(len, PAGE_CACHE_SIZE);
123 while (workspace->strm.total_in < len) {
124 ret = zlib_deflate(&workspace->strm, Z_SYNC_FLUSH);
125 if (ret != Z_OK) {
126 printk(KERN_DEBUG "BTRFS: deflate in loop returned %d\n",
127 ret);
128 zlib_deflateEnd(&workspace->strm);
129 ret = -EIO;
130 goto out;
133 /* we're making it bigger, give up */
134 if (workspace->strm.total_in > 8192 &&
135 workspace->strm.total_in <
136 workspace->strm.total_out) {
137 ret = -E2BIG;
138 goto out;
140 /* we need another page for writing out. Test this
141 * before the total_in so we will pull in a new page for
142 * the stream end if required
144 if (workspace->strm.avail_out == 0) {
145 kunmap(out_page);
146 if (nr_pages == nr_dest_pages) {
147 out_page = NULL;
148 ret = -E2BIG;
149 goto out;
151 out_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
152 if (out_page == NULL) {
153 ret = -ENOMEM;
154 goto out;
156 cpage_out = kmap(out_page);
157 pages[nr_pages] = out_page;
158 nr_pages++;
159 workspace->strm.avail_out = PAGE_CACHE_SIZE;
160 workspace->strm.next_out = cpage_out;
162 /* we're all done */
163 if (workspace->strm.total_in >= len)
164 break;
166 /* we've read in a full page, get a new one */
167 if (workspace->strm.avail_in == 0) {
168 if (workspace->strm.total_out > max_out)
169 break;
171 bytes_left = len - workspace->strm.total_in;
172 kunmap(in_page);
173 page_cache_release(in_page);
175 start += PAGE_CACHE_SIZE;
176 in_page = find_get_page(mapping,
177 start >> PAGE_CACHE_SHIFT);
178 data_in = kmap(in_page);
179 workspace->strm.avail_in = min(bytes_left,
180 PAGE_CACHE_SIZE);
181 workspace->strm.next_in = data_in;
184 workspace->strm.avail_in = 0;
185 ret = zlib_deflate(&workspace->strm, Z_FINISH);
186 zlib_deflateEnd(&workspace->strm);
188 if (ret != Z_STREAM_END) {
189 ret = -EIO;
190 goto out;
193 if (workspace->strm.total_out >= workspace->strm.total_in) {
194 ret = -E2BIG;
195 goto out;
198 ret = 0;
199 *total_out = workspace->strm.total_out;
200 *total_in = workspace->strm.total_in;
201 out:
202 *out_pages = nr_pages;
203 if (out_page)
204 kunmap(out_page);
206 if (in_page) {
207 kunmap(in_page);
208 page_cache_release(in_page);
210 return ret;
213 static int zlib_decompress_biovec(struct list_head *ws, struct page **pages_in,
214 u64 disk_start,
215 struct bio_vec *bvec,
216 int vcnt,
217 size_t srclen)
219 struct workspace *workspace = list_entry(ws, struct workspace, list);
220 int ret = 0, ret2;
221 int wbits = MAX_WBITS;
222 char *data_in;
223 size_t total_out = 0;
224 unsigned long page_in_index = 0;
225 unsigned long page_out_index = 0;
226 unsigned long total_pages_in = DIV_ROUND_UP(srclen, PAGE_CACHE_SIZE);
227 unsigned long buf_start;
228 unsigned long pg_offset;
230 data_in = kmap(pages_in[page_in_index]);
231 workspace->strm.next_in = data_in;
232 workspace->strm.avail_in = min_t(size_t, srclen, PAGE_CACHE_SIZE);
233 workspace->strm.total_in = 0;
235 workspace->strm.total_out = 0;
236 workspace->strm.next_out = workspace->buf;
237 workspace->strm.avail_out = PAGE_CACHE_SIZE;
238 pg_offset = 0;
240 /* If it's deflate, and it's got no preset dictionary, then
241 we can tell zlib to skip the adler32 check. */
242 if (srclen > 2 && !(data_in[1] & PRESET_DICT) &&
243 ((data_in[0] & 0x0f) == Z_DEFLATED) &&
244 !(((data_in[0]<<8) + data_in[1]) % 31)) {
246 wbits = -((data_in[0] >> 4) + 8);
247 workspace->strm.next_in += 2;
248 workspace->strm.avail_in -= 2;
251 if (Z_OK != zlib_inflateInit2(&workspace->strm, wbits)) {
252 printk(KERN_WARNING "BTRFS: inflateInit failed\n");
253 return -EIO;
255 while (workspace->strm.total_in < srclen) {
256 ret = zlib_inflate(&workspace->strm, Z_NO_FLUSH);
257 if (ret != Z_OK && ret != Z_STREAM_END)
258 break;
260 buf_start = total_out;
261 total_out = workspace->strm.total_out;
263 /* we didn't make progress in this inflate call, we're done */
264 if (buf_start == total_out)
265 break;
267 ret2 = btrfs_decompress_buf2page(workspace->buf, buf_start,
268 total_out, disk_start,
269 bvec, vcnt,
270 &page_out_index, &pg_offset);
271 if (ret2 == 0) {
272 ret = 0;
273 goto done;
276 workspace->strm.next_out = workspace->buf;
277 workspace->strm.avail_out = PAGE_CACHE_SIZE;
279 if (workspace->strm.avail_in == 0) {
280 unsigned long tmp;
281 kunmap(pages_in[page_in_index]);
282 page_in_index++;
283 if (page_in_index >= total_pages_in) {
284 data_in = NULL;
285 break;
287 data_in = kmap(pages_in[page_in_index]);
288 workspace->strm.next_in = data_in;
289 tmp = srclen - workspace->strm.total_in;
290 workspace->strm.avail_in = min(tmp,
291 PAGE_CACHE_SIZE);
294 if (ret != Z_STREAM_END)
295 ret = -EIO;
296 else
297 ret = 0;
298 done:
299 zlib_inflateEnd(&workspace->strm);
300 if (data_in)
301 kunmap(pages_in[page_in_index]);
302 if (!ret)
303 btrfs_clear_biovec_end(bvec, vcnt, page_out_index, pg_offset);
304 return ret;
307 static int zlib_decompress(struct list_head *ws, unsigned char *data_in,
308 struct page *dest_page,
309 unsigned long start_byte,
310 size_t srclen, size_t destlen)
312 struct workspace *workspace = list_entry(ws, struct workspace, list);
313 int ret = 0;
314 int wbits = MAX_WBITS;
315 unsigned long bytes_left;
316 unsigned long total_out = 0;
317 unsigned long pg_offset = 0;
318 char *kaddr;
320 destlen = min_t(unsigned long, destlen, PAGE_SIZE);
321 bytes_left = destlen;
323 workspace->strm.next_in = data_in;
324 workspace->strm.avail_in = srclen;
325 workspace->strm.total_in = 0;
327 workspace->strm.next_out = workspace->buf;
328 workspace->strm.avail_out = PAGE_CACHE_SIZE;
329 workspace->strm.total_out = 0;
330 /* If it's deflate, and it's got no preset dictionary, then
331 we can tell zlib to skip the adler32 check. */
332 if (srclen > 2 && !(data_in[1] & PRESET_DICT) &&
333 ((data_in[0] & 0x0f) == Z_DEFLATED) &&
334 !(((data_in[0]<<8) + data_in[1]) % 31)) {
336 wbits = -((data_in[0] >> 4) + 8);
337 workspace->strm.next_in += 2;
338 workspace->strm.avail_in -= 2;
341 if (Z_OK != zlib_inflateInit2(&workspace->strm, wbits)) {
342 printk(KERN_WARNING "BTRFS: inflateInit failed\n");
343 return -EIO;
346 while (bytes_left > 0) {
347 unsigned long buf_start;
348 unsigned long buf_offset;
349 unsigned long bytes;
351 ret = zlib_inflate(&workspace->strm, Z_NO_FLUSH);
352 if (ret != Z_OK && ret != Z_STREAM_END)
353 break;
355 buf_start = total_out;
356 total_out = workspace->strm.total_out;
358 if (total_out == buf_start) {
359 ret = -EIO;
360 break;
363 if (total_out <= start_byte)
364 goto next;
366 if (total_out > start_byte && buf_start < start_byte)
367 buf_offset = start_byte - buf_start;
368 else
369 buf_offset = 0;
371 bytes = min(PAGE_CACHE_SIZE - pg_offset,
372 PAGE_CACHE_SIZE - buf_offset);
373 bytes = min(bytes, bytes_left);
375 kaddr = kmap_atomic(dest_page);
376 memcpy(kaddr + pg_offset, workspace->buf + buf_offset, bytes);
377 kunmap_atomic(kaddr);
379 pg_offset += bytes;
380 bytes_left -= bytes;
381 next:
382 workspace->strm.next_out = workspace->buf;
383 workspace->strm.avail_out = PAGE_CACHE_SIZE;
386 if (ret != Z_STREAM_END && bytes_left != 0)
387 ret = -EIO;
388 else
389 ret = 0;
391 zlib_inflateEnd(&workspace->strm);
394 * this should only happen if zlib returned fewer bytes than we
395 * expected. btrfs_get_block is responsible for zeroing from the
396 * end of the inline extent (destlen) to the end of the page
398 if (pg_offset < destlen) {
399 kaddr = kmap_atomic(dest_page);
400 memset(kaddr + pg_offset, 0, destlen - pg_offset);
401 kunmap_atomic(kaddr);
403 return ret;
406 const struct btrfs_compress_op btrfs_zlib_compress = {
407 .alloc_workspace = zlib_alloc_workspace,
408 .free_workspace = zlib_free_workspace,
409 .compress_pages = zlib_compress_pages,
410 .decompress_biovec = zlib_decompress_biovec,
411 .decompress = zlib_decompress,