3 * Phillip Lougher <phillip@squashfs.org.uk>
5 * This work is licensed under the terms of the GNU GPL, version 2. See
6 * the COPYING file in the top-level directory.
10 #include <linux/vfs.h>
11 #include <linux/kernel.h>
12 #include <linux/slab.h>
13 #include <linux/string.h>
14 #include <linux/pagemap.h>
15 #include <linux/mutex.h>
17 #include "squashfs_fs.h"
18 #include "squashfs_fs_sb.h"
19 #include "squashfs_fs_i.h"
21 #include "page_actor.h"
23 static int squashfs_read_cache(struct page
*target_page
, u64 block
, int bsize
,
24 int pages
, struct page
**page
, int bytes
);
26 /* Read separately compressed datablock directly into page cache */
27 int squashfs_readpage_block(struct page
*target_page
, u64 block
, int bsize
,
31 struct inode
*inode
= target_page
->mapping
->host
;
32 struct squashfs_sb_info
*msblk
= inode
->i_sb
->s_fs_info
;
34 int file_end
= (i_size_read(inode
) - 1) >> PAGE_SHIFT
;
35 int mask
= (1 << (msblk
->block_log
- PAGE_SHIFT
)) - 1;
36 int start_index
= target_page
->index
& ~mask
;
37 int end_index
= start_index
| mask
;
38 int i
, n
, pages
, missing_pages
, bytes
, res
= -ENOMEM
;
40 struct squashfs_page_actor
*actor
;
43 if (end_index
> file_end
)
46 pages
= end_index
- start_index
+ 1;
48 page
= kmalloc_array(pages
, sizeof(void *), GFP_KERNEL
);
53 * Create a "page actor" which will kmap and kunmap the
54 * page cache pages appropriately within the decompressor
56 actor
= squashfs_page_actor_init_special(page
, pages
, 0);
60 /* Try to grab all the pages covered by the Squashfs block */
61 for (missing_pages
= 0, i
= 0, n
= start_index
; i
< pages
; i
++, n
++) {
62 page
[i
] = (n
== target_page
->index
) ? target_page
:
63 grab_cache_page_nowait(target_page
->mapping
, n
);
65 if (page
[i
] == NULL
) {
70 if (PageUptodate(page
[i
])) {
80 * Couldn't get one or more pages, this page has either
81 * been VM reclaimed, but others are still in the page cache
82 * and uptodate, or we're racing with another thread in
83 * squashfs_readpage also trying to grab them. Fall back to
84 * using an intermediate buffer.
86 res
= squashfs_read_cache(target_page
, block
, bsize
, pages
,
94 /* Decompress directly into the page cache buffers */
95 res
= squashfs_read_data(inode
->i_sb
, block
, bsize
, NULL
, actor
);
99 if (res
!= expected
) {
104 /* Last page may have trailing bytes not filled */
105 bytes
= res
% PAGE_SIZE
;
107 pageaddr
= kmap_atomic(page
[pages
- 1]);
108 memset(pageaddr
+ bytes
, 0, PAGE_SIZE
- bytes
);
109 kunmap_atomic(pageaddr
);
112 /* Mark pages as uptodate, unlock and release */
113 for (i
= 0; i
< pages
; i
++) {
114 flush_dcache_page(page
[i
]);
115 SetPageUptodate(page
[i
]);
116 unlock_page(page
[i
]);
117 if (page
[i
] != target_page
)
127 /* Decompression failed, mark pages as errored. Target_page is
128 * dealt with by the caller
130 for (i
= 0; i
< pages
; i
++) {
131 if (page
[i
] == NULL
|| page
[i
] == target_page
)
133 flush_dcache_page(page
[i
]);
134 SetPageError(page
[i
]);
135 unlock_page(page
[i
]);
146 static int squashfs_read_cache(struct page
*target_page
, u64 block
, int bsize
,
147 int pages
, struct page
**page
, int bytes
)
149 struct inode
*i
= target_page
->mapping
->host
;
150 struct squashfs_cache_entry
*buffer
= squashfs_get_datablock(i
->i_sb
,
152 int res
= buffer
->error
, n
, offset
= 0;
155 ERROR("Unable to read page, block %llx, size %x\n", block
,
160 for (n
= 0; n
< pages
&& bytes
> 0; n
++,
161 bytes
-= PAGE_SIZE
, offset
+= PAGE_SIZE
) {
162 int avail
= min_t(int, bytes
, PAGE_SIZE
);
167 squashfs_fill_page(page
[n
], buffer
, offset
, avail
);
168 unlock_page(page
[n
]);
169 if (page
[n
] != target_page
)
174 squashfs_cache_put(buffer
);