update the OBSOLETE_OSS_DRIVER help text
[linux/fpc-iii.git] / fs / isofs / compress.c
blob4917315db732e881cecbd50e89573333205d9959
1 /* -*- linux-c -*- ------------------------------------------------------- *
2 *
3 * Copyright 2001 H. Peter Anvin - All Rights Reserved
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
8 * USA; either version 2 of the License, or (at your option) any later
9 * version; incorporated herein by reference.
11 * ----------------------------------------------------------------------- */
14 * linux/fs/isofs/compress.c
16 * Transparent decompression of files on an iso9660 filesystem
19 #include <linux/config.h>
20 #include <linux/module.h>
21 #include <linux/init.h>
23 #include <linux/vmalloc.h>
24 #include <linux/zlib.h>
26 #include "isofs.h"
27 #include "zisofs.h"
29 /* This should probably be global. */
30 static char zisofs_sink_page[PAGE_CACHE_SIZE];
33 * This contains the zlib memory allocation and the mutex for the
34 * allocation; this avoids failures at block-decompression time.
36 static void *zisofs_zlib_workspace;
37 static struct semaphore zisofs_zlib_semaphore;
40 * When decompressing, we typically obtain more than one page
41 * per reference. We inject the additional pages into the page
42 * cache as a form of readahead.
44 static int zisofs_readpage(struct file *file, struct page *page)
46 struct inode *inode = file->f_dentry->d_inode;
47 struct address_space *mapping = inode->i_mapping;
48 unsigned int maxpage, xpage, fpage, blockindex;
49 unsigned long offset;
50 unsigned long blockptr, blockendptr, cstart, cend, csize;
51 struct buffer_head *bh, *ptrbh[2];
52 unsigned long bufsize = ISOFS_BUFFER_SIZE(inode);
53 unsigned int bufshift = ISOFS_BUFFER_BITS(inode);
54 unsigned long bufmask = bufsize - 1;
55 int err = -EIO;
56 int i;
57 unsigned int header_size = ISOFS_I(inode)->i_format_parm[0];
58 unsigned int zisofs_block_shift = ISOFS_I(inode)->i_format_parm[1];
59 /* unsigned long zisofs_block_size = 1UL << zisofs_block_shift; */
60 unsigned int zisofs_block_page_shift = zisofs_block_shift-PAGE_CACHE_SHIFT;
61 unsigned long zisofs_block_pages = 1UL << zisofs_block_page_shift;
62 unsigned long zisofs_block_page_mask = zisofs_block_pages-1;
63 struct page *pages[zisofs_block_pages];
64 unsigned long index = page->index;
65 int indexblocks;
67 /* We have already been given one page, this is the one
68 we must do. */
69 xpage = index & zisofs_block_page_mask;
70 pages[xpage] = page;
72 /* The remaining pages need to be allocated and inserted */
73 offset = index & ~zisofs_block_page_mask;
74 blockindex = offset >> zisofs_block_page_shift;
75 maxpage = (inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
76 maxpage = min(zisofs_block_pages, maxpage-offset);
78 for ( i = 0 ; i < maxpage ; i++, offset++ ) {
79 if ( i != xpage ) {
80 pages[i] = grab_cache_page_nowait(mapping, offset);
82 page = pages[i];
83 if ( page ) {
84 ClearPageError(page);
85 kmap(page);
89 /* This is the last page filled, plus one; used in case of abort. */
90 fpage = 0;
92 /* Find the pointer to this specific chunk */
93 /* Note: we're not using isonum_731() here because the data is known aligned */
94 /* Note: header_size is in 32-bit words (4 bytes) */
95 blockptr = (header_size + blockindex) << 2;
96 blockendptr = blockptr + 4;
98 indexblocks = ((blockptr^blockendptr) >> bufshift) ? 2 : 1;
99 ptrbh[0] = ptrbh[1] = NULL;
101 if ( isofs_get_blocks(inode, blockptr >> bufshift, ptrbh, indexblocks) != indexblocks ) {
102 if ( ptrbh[0] ) brelse(ptrbh[0]);
103 printk(KERN_DEBUG "zisofs: Null buffer on reading block table, inode = %lu, block = %lu\n",
104 inode->i_ino, blockptr >> bufshift);
105 goto eio;
107 ll_rw_block(READ, indexblocks, ptrbh);
109 bh = ptrbh[0];
110 if ( !bh || (wait_on_buffer(bh), !buffer_uptodate(bh)) ) {
111 printk(KERN_DEBUG "zisofs: Failed to read block table, inode = %lu, block = %lu\n",
112 inode->i_ino, blockptr >> bufshift);
113 if ( ptrbh[1] )
114 brelse(ptrbh[1]);
115 goto eio;
117 cstart = le32_to_cpu(*(__le32 *)(bh->b_data + (blockptr & bufmask)));
119 if ( indexblocks == 2 ) {
120 /* We just crossed a block boundary. Switch to the next block */
121 brelse(bh);
122 bh = ptrbh[1];
123 if ( !bh || (wait_on_buffer(bh), !buffer_uptodate(bh)) ) {
124 printk(KERN_DEBUG "zisofs: Failed to read block table, inode = %lu, block = %lu\n",
125 inode->i_ino, blockendptr >> bufshift);
126 goto eio;
129 cend = le32_to_cpu(*(__le32 *)(bh->b_data + (blockendptr & bufmask)));
130 brelse(bh);
132 if (cstart > cend)
133 goto eio;
135 csize = cend-cstart;
137 if (csize > deflateBound(1UL << zisofs_block_shift))
138 goto eio;
140 /* Now page[] contains an array of pages, any of which can be NULL,
141 and the locks on which we hold. We should now read the data and
142 release the pages. If the pages are NULL the decompressed data
143 for that particular page should be discarded. */
145 if ( csize == 0 ) {
146 /* This data block is empty. */
148 for ( fpage = 0 ; fpage < maxpage ; fpage++ ) {
149 if ( (page = pages[fpage]) != NULL ) {
150 memset(page_address(page), 0, PAGE_CACHE_SIZE);
152 flush_dcache_page(page);
153 SetPageUptodate(page);
154 kunmap(page);
155 unlock_page(page);
156 if ( fpage == xpage )
157 err = 0; /* The critical page */
158 else
159 page_cache_release(page);
162 } else {
163 /* This data block is compressed. */
164 z_stream stream;
165 int bail = 0, left_out = -1;
166 int zerr;
167 int needblocks = (csize + (cstart & bufmask) + bufmask) >> bufshift;
168 int haveblocks;
169 struct buffer_head *bhs[needblocks+1];
170 struct buffer_head **bhptr;
172 /* Because zlib is not thread-safe, do all the I/O at the top. */
174 blockptr = cstart >> bufshift;
175 memset(bhs, 0, (needblocks+1)*sizeof(struct buffer_head *));
176 haveblocks = isofs_get_blocks(inode, blockptr, bhs, needblocks);
177 ll_rw_block(READ, haveblocks, bhs);
179 bhptr = &bhs[0];
180 bh = *bhptr++;
182 /* First block is special since it may be fractional.
183 We also wait for it before grabbing the zlib
184 semaphore; odds are that the subsequent blocks are
185 going to come in in short order so we don't hold
186 the zlib semaphore longer than necessary. */
188 if ( !bh || (wait_on_buffer(bh), !buffer_uptodate(bh)) ) {
189 printk(KERN_DEBUG "zisofs: Hit null buffer, fpage = %d, xpage = %d, csize = %ld\n",
190 fpage, xpage, csize);
191 goto b_eio;
193 stream.next_in = bh->b_data + (cstart & bufmask);
194 stream.avail_in = min(bufsize-(cstart & bufmask), csize);
195 csize -= stream.avail_in;
197 stream.workspace = zisofs_zlib_workspace;
198 down(&zisofs_zlib_semaphore);
200 zerr = zlib_inflateInit(&stream);
201 if ( zerr != Z_OK ) {
202 if ( err && zerr == Z_MEM_ERROR )
203 err = -ENOMEM;
204 printk(KERN_DEBUG "zisofs: zisofs_inflateInit returned %d\n",
205 zerr);
206 goto z_eio;
209 while ( !bail && fpage < maxpage ) {
210 page = pages[fpage];
211 if ( page )
212 stream.next_out = page_address(page);
213 else
214 stream.next_out = (void *)&zisofs_sink_page;
215 stream.avail_out = PAGE_CACHE_SIZE;
217 while ( stream.avail_out ) {
218 int ao, ai;
219 if ( stream.avail_in == 0 && left_out ) {
220 if ( !csize ) {
221 printk(KERN_WARNING "zisofs: ZF read beyond end of input\n");
222 bail = 1;
223 break;
224 } else {
225 bh = *bhptr++;
226 if ( !bh ||
227 (wait_on_buffer(bh), !buffer_uptodate(bh)) ) {
228 /* Reached an EIO */
229 printk(KERN_DEBUG "zisofs: Hit null buffer, fpage = %d, xpage = %d, csize = %ld\n",
230 fpage, xpage, csize);
232 bail = 1;
233 break;
235 stream.next_in = bh->b_data;
236 stream.avail_in = min(csize,bufsize);
237 csize -= stream.avail_in;
240 ao = stream.avail_out; ai = stream.avail_in;
241 zerr = zlib_inflate(&stream, Z_SYNC_FLUSH);
242 left_out = stream.avail_out;
243 if ( zerr == Z_BUF_ERROR && stream.avail_in == 0 )
244 continue;
245 if ( zerr != Z_OK ) {
246 /* EOF, error, or trying to read beyond end of input */
247 if ( err && zerr == Z_MEM_ERROR )
248 err = -ENOMEM;
249 if ( zerr != Z_STREAM_END )
250 printk(KERN_DEBUG "zisofs: zisofs_inflate returned %d, inode = %lu, index = %lu, fpage = %d, xpage = %d, avail_in = %d, avail_out = %d, ai = %d, ao = %d\n",
251 zerr, inode->i_ino, index,
252 fpage, xpage,
253 stream.avail_in, stream.avail_out,
254 ai, ao);
255 bail = 1;
256 break;
260 if ( stream.avail_out && zerr == Z_STREAM_END ) {
261 /* Fractional page written before EOF. This may
262 be the last page in the file. */
263 memset(stream.next_out, 0, stream.avail_out);
264 stream.avail_out = 0;
267 if ( !stream.avail_out ) {
268 /* This page completed */
269 if ( page ) {
270 flush_dcache_page(page);
271 SetPageUptodate(page);
272 kunmap(page);
273 unlock_page(page);
274 if ( fpage == xpage )
275 err = 0; /* The critical page */
276 else
277 page_cache_release(page);
279 fpage++;
282 zlib_inflateEnd(&stream);
284 z_eio:
285 up(&zisofs_zlib_semaphore);
287 b_eio:
288 for ( i = 0 ; i < haveblocks ; i++ ) {
289 if ( bhs[i] )
290 brelse(bhs[i]);
294 eio:
296 /* Release any residual pages, do not SetPageUptodate */
297 while ( fpage < maxpage ) {
298 page = pages[fpage];
299 if ( page ) {
300 flush_dcache_page(page);
301 if ( fpage == xpage )
302 SetPageError(page);
303 kunmap(page);
304 unlock_page(page);
305 if ( fpage != xpage )
306 page_cache_release(page);
308 fpage++;
311 /* At this point, err contains 0 or -EIO depending on the "critical" page */
312 return err;
315 struct address_space_operations zisofs_aops = {
316 .readpage = zisofs_readpage,
317 /* No sync_page operation supported? */
318 /* No bmap operation supported */
321 static int initialized;
323 int __init zisofs_init(void)
325 if ( initialized ) {
326 printk("zisofs_init: called more than once\n");
327 return 0;
330 zisofs_zlib_workspace = vmalloc(zlib_inflate_workspacesize());
331 if ( !zisofs_zlib_workspace )
332 return -ENOMEM;
333 init_MUTEX(&zisofs_zlib_semaphore);
335 initialized = 1;
336 return 0;
339 void zisofs_cleanup(void)
341 if ( !initialized ) {
342 printk("zisofs_cleanup: called without initialization\n");
343 return;
346 vfree(zisofs_zlib_workspace);
347 initialized = 0;