2 * Copyright (C) 2008 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/kernel.h>
20 #include <linux/slab.h>
22 #include <linux/init.h>
23 #include <linux/err.h>
24 #include <linux/sched.h>
25 #include <linux/pagemap.h>
26 #include <linux/bio.h>
27 #include <linux/lzo.h>
28 #include <linux/refcount.h>
29 #include "compression.h"
35 void *buf
; /* where decompressed data goes */
36 void *cbuf
; /* where compressed data goes */
37 struct list_head list
;
40 static void lzo_free_workspace(struct list_head
*ws
)
42 struct workspace
*workspace
= list_entry(ws
, struct workspace
, list
);
44 kvfree(workspace
->buf
);
45 kvfree(workspace
->cbuf
);
46 kvfree(workspace
->mem
);
50 static struct list_head
*lzo_alloc_workspace(void)
52 struct workspace
*workspace
;
54 workspace
= kzalloc(sizeof(*workspace
), GFP_KERNEL
);
56 return ERR_PTR(-ENOMEM
);
58 workspace
->mem
= kvmalloc(LZO1X_MEM_COMPRESS
, GFP_KERNEL
);
59 workspace
->buf
= kvmalloc(lzo1x_worst_compress(PAGE_SIZE
), GFP_KERNEL
);
60 workspace
->cbuf
= kvmalloc(lzo1x_worst_compress(PAGE_SIZE
), GFP_KERNEL
);
61 if (!workspace
->mem
|| !workspace
->buf
|| !workspace
->cbuf
)
64 INIT_LIST_HEAD(&workspace
->list
);
66 return &workspace
->list
;
68 lzo_free_workspace(&workspace
->list
);
69 return ERR_PTR(-ENOMEM
);
72 static inline void write_compress_length(char *buf
, size_t len
)
76 dlen
= cpu_to_le32(len
);
77 memcpy(buf
, &dlen
, LZO_LEN
);
80 static inline size_t read_compress_length(const char *buf
)
84 memcpy(&dlen
, buf
, LZO_LEN
);
85 return le32_to_cpu(dlen
);
88 static int lzo_compress_pages(struct list_head
*ws
,
89 struct address_space
*mapping
,
92 unsigned long *out_pages
,
93 unsigned long *total_in
,
94 unsigned long *total_out
)
96 struct workspace
*workspace
= list_entry(ws
, struct workspace
, list
);
101 struct page
*in_page
= NULL
;
102 struct page
*out_page
= NULL
;
103 unsigned long bytes_left
;
104 unsigned long len
= *total_out
;
105 unsigned long nr_dest_pages
= *out_pages
;
106 const unsigned long max_out
= nr_dest_pages
* PAGE_SIZE
;
110 unsigned long tot_in
= 0;
111 unsigned long tot_out
= 0;
112 unsigned long pg_bytes_left
;
113 unsigned long out_offset
;
120 in_page
= find_get_page(mapping
, start
>> PAGE_SHIFT
);
121 data_in
= kmap(in_page
);
124 * store the size of all chunks of compressed data in
127 out_page
= alloc_page(GFP_NOFS
| __GFP_HIGHMEM
);
128 if (out_page
== NULL
) {
132 cpage_out
= kmap(out_page
);
133 out_offset
= LZO_LEN
;
137 pg_bytes_left
= PAGE_SIZE
- LZO_LEN
;
139 /* compress at most one page of data each time */
140 in_len
= min(len
, PAGE_SIZE
);
141 while (tot_in
< len
) {
142 ret
= lzo1x_1_compress(data_in
, in_len
, workspace
->cbuf
,
143 &out_len
, workspace
->mem
);
144 if (ret
!= LZO_E_OK
) {
145 pr_debug("BTRFS: lzo in loop returned %d\n",
151 /* store the size of this chunk of compressed data */
152 write_compress_length(cpage_out
+ out_offset
, out_len
);
154 out_offset
+= LZO_LEN
;
155 pg_bytes_left
-= LZO_LEN
;
160 /* copy bytes from the working buffer into the pages */
161 buf
= workspace
->cbuf
;
163 bytes
= min_t(unsigned long, pg_bytes_left
, out_len
);
165 memcpy(cpage_out
+ out_offset
, buf
, bytes
);
168 pg_bytes_left
-= bytes
;
173 * we need another page for writing out.
175 * Note if there's less than 4 bytes left, we just
176 * skip to a new page.
178 if ((out_len
== 0 && pg_bytes_left
< LZO_LEN
) ||
179 pg_bytes_left
== 0) {
181 memset(cpage_out
+ out_offset
, 0,
183 tot_out
+= pg_bytes_left
;
186 /* we're done, don't allocate new page */
187 if (out_len
== 0 && tot_in
>= len
)
191 if (nr_pages
== nr_dest_pages
) {
197 out_page
= alloc_page(GFP_NOFS
| __GFP_HIGHMEM
);
198 if (out_page
== NULL
) {
202 cpage_out
= kmap(out_page
);
203 pages
[nr_pages
++] = out_page
;
205 pg_bytes_left
= PAGE_SIZE
;
210 /* we're making it bigger, give up */
211 if (tot_in
> 8192 && tot_in
< tot_out
) {
220 if (tot_out
> max_out
)
223 bytes_left
= len
- tot_in
;
228 in_page
= find_get_page(mapping
, start
>> PAGE_SHIFT
);
229 data_in
= kmap(in_page
);
230 in_len
= min(bytes_left
, PAGE_SIZE
);
233 if (tot_out
>= tot_in
) {
238 /* store the size of all chunks of compressed data */
239 cpage_out
= kmap(pages
[0]);
240 write_compress_length(cpage_out
, tot_out
);
245 *total_out
= tot_out
;
248 *out_pages
= nr_pages
;
260 static int lzo_decompress_bio(struct list_head
*ws
, struct compressed_bio
*cb
)
262 struct workspace
*workspace
= list_entry(ws
, struct workspace
, list
);
265 unsigned long page_in_index
= 0;
266 size_t srclen
= cb
->compressed_len
;
267 unsigned long total_pages_in
= DIV_ROUND_UP(srclen
, PAGE_SIZE
);
268 unsigned long buf_start
;
269 unsigned long buf_offset
= 0;
271 unsigned long working_bytes
;
274 unsigned long in_offset
;
275 unsigned long in_page_bytes_left
;
276 unsigned long tot_in
;
277 unsigned long tot_out
;
278 unsigned long tot_len
;
280 bool may_late_unmap
, need_unmap
;
281 struct page
**pages_in
= cb
->compressed_pages
;
282 u64 disk_start
= cb
->start
;
283 struct bio
*orig_bio
= cb
->orig_bio
;
285 data_in
= kmap(pages_in
[0]);
286 tot_len
= read_compress_length(data_in
);
290 tot_len
= min_t(size_t, srclen
, tot_len
);
291 in_page_bytes_left
= PAGE_SIZE
- LZO_LEN
;
295 while (tot_in
< tot_len
) {
296 in_len
= read_compress_length(data_in
+ in_offset
);
297 in_page_bytes_left
-= LZO_LEN
;
298 in_offset
+= LZO_LEN
;
302 working_bytes
= in_len
;
303 may_late_unmap
= need_unmap
= false;
305 /* fast path: avoid using the working buffer */
306 if (in_page_bytes_left
>= in_len
) {
307 buf
= data_in
+ in_offset
;
309 may_late_unmap
= true;
313 /* copy bytes from the pages into the working buffer */
314 buf
= workspace
->cbuf
;
316 while (working_bytes
) {
317 bytes
= min(working_bytes
, in_page_bytes_left
);
319 memcpy(buf
+ buf_offset
, data_in
+ in_offset
, bytes
);
322 working_bytes
-= bytes
;
323 in_page_bytes_left
-= bytes
;
326 /* check if we need to pick another page */
327 if ((working_bytes
== 0 && in_page_bytes_left
< LZO_LEN
)
328 || in_page_bytes_left
== 0) {
329 tot_in
+= in_page_bytes_left
;
331 if (working_bytes
== 0 && tot_in
>= tot_len
)
334 if (page_in_index
+ 1 >= total_pages_in
) {
342 kunmap(pages_in
[page_in_index
]);
344 data_in
= kmap(pages_in
[++page_in_index
]);
346 in_page_bytes_left
= PAGE_SIZE
;
351 out_len
= lzo1x_worst_compress(PAGE_SIZE
);
352 ret
= lzo1x_decompress_safe(buf
, in_len
, workspace
->buf
,
355 kunmap(pages_in
[page_in_index
- 1]);
356 if (ret
!= LZO_E_OK
) {
357 pr_warn("BTRFS: decompress failed\n");
365 ret2
= btrfs_decompress_buf2page(workspace
->buf
, buf_start
,
366 tot_out
, disk_start
, orig_bio
);
371 kunmap(pages_in
[page_in_index
]);
373 zero_fill_bio(orig_bio
);
377 static int lzo_decompress(struct list_head
*ws
, unsigned char *data_in
,
378 struct page
*dest_page
,
379 unsigned long start_byte
,
380 size_t srclen
, size_t destlen
)
382 struct workspace
*workspace
= list_entry(ws
, struct workspace
, list
);
390 BUG_ON(srclen
< LZO_LEN
);
392 tot_len
= read_compress_length(data_in
);
395 in_len
= read_compress_length(data_in
);
399 ret
= lzo1x_decompress_safe(data_in
, in_len
, workspace
->buf
, &out_len
);
400 if (ret
!= LZO_E_OK
) {
401 pr_warn("BTRFS: decompress failed!\n");
406 if (out_len
< start_byte
) {
412 * the caller is already checking against PAGE_SIZE, but lets
413 * move this check closer to the memcpy/memset
415 destlen
= min_t(unsigned long, destlen
, PAGE_SIZE
);
416 bytes
= min_t(unsigned long, destlen
, out_len
- start_byte
);
418 kaddr
= kmap_atomic(dest_page
);
419 memcpy(kaddr
, workspace
->buf
+ start_byte
, bytes
);
422 * btrfs_getblock is doing a zero on the tail of the page too,
423 * but this will cover anything missing from the decompressed
427 memset(kaddr
+bytes
, 0, destlen
-bytes
);
428 kunmap_atomic(kaddr
);
433 const struct btrfs_compress_op btrfs_lzo_compress
= {
434 .alloc_workspace
= lzo_alloc_workspace
,
435 .free_workspace
= lzo_free_workspace
,
436 .compress_pages
= lzo_compress_pages
,
437 .decompress_bio
= lzo_decompress_bio
,
438 .decompress
= lzo_decompress
,