x86/mm/pat: Don't report PAT on CPUs that don't support it
[linux/fpc-iii.git] / fs / afs / file.c
blob0d5b8508869bf0642a88d4c87b3feb49c1fab433
1 /* AFS filesystem file handling
3 * Copyright (C) 2002, 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/init.h>
15 #include <linux/fs.h>
16 #include <linux/pagemap.h>
17 #include <linux/writeback.h>
18 #include <linux/gfp.h>
19 #include <linux/task_io_accounting_ops.h>
20 #include "internal.h"
22 static int afs_readpage(struct file *file, struct page *page);
23 static void afs_invalidatepage(struct page *page, unsigned int offset,
24 unsigned int length);
25 static int afs_releasepage(struct page *page, gfp_t gfp_flags);
26 static int afs_launder_page(struct page *page);
28 static int afs_readpages(struct file *filp, struct address_space *mapping,
29 struct list_head *pages, unsigned nr_pages);
31 const struct file_operations afs_file_operations = {
32 .open = afs_open,
33 .flush = afs_flush,
34 .release = afs_release,
35 .llseek = generic_file_llseek,
36 .read_iter = generic_file_read_iter,
37 .write_iter = afs_file_write,
38 .mmap = generic_file_readonly_mmap,
39 .splice_read = generic_file_splice_read,
40 .fsync = afs_fsync,
41 .lock = afs_lock,
42 .flock = afs_flock,
45 const struct inode_operations afs_file_inode_operations = {
46 .getattr = afs_getattr,
47 .setattr = afs_setattr,
48 .permission = afs_permission,
51 const struct address_space_operations afs_fs_aops = {
52 .readpage = afs_readpage,
53 .readpages = afs_readpages,
54 .set_page_dirty = afs_set_page_dirty,
55 .launder_page = afs_launder_page,
56 .releasepage = afs_releasepage,
57 .invalidatepage = afs_invalidatepage,
58 .write_begin = afs_write_begin,
59 .write_end = afs_write_end,
60 .writepage = afs_writepage,
61 .writepages = afs_writepages,
65 * open an AFS file or directory and attach a key to it
67 int afs_open(struct inode *inode, struct file *file)
69 struct afs_vnode *vnode = AFS_FS_I(inode);
70 struct key *key;
71 int ret;
73 _enter("{%x:%u},", vnode->fid.vid, vnode->fid.vnode);
75 key = afs_request_key(vnode->volume->cell);
76 if (IS_ERR(key)) {
77 _leave(" = %ld [key]", PTR_ERR(key));
78 return PTR_ERR(key);
81 ret = afs_validate(vnode, key);
82 if (ret < 0) {
83 _leave(" = %d [val]", ret);
84 return ret;
87 file->private_data = key;
88 _leave(" = 0");
89 return 0;
93 * release an AFS file or directory and discard its key
95 int afs_release(struct inode *inode, struct file *file)
97 struct afs_vnode *vnode = AFS_FS_I(inode);
99 _enter("{%x:%u},", vnode->fid.vid, vnode->fid.vnode);
101 key_put(file->private_data);
102 _leave(" = 0");
103 return 0;
107 * Dispose of a ref to a read record.
109 void afs_put_read(struct afs_read *req)
111 int i;
113 if (atomic_dec_and_test(&req->usage)) {
114 for (i = 0; i < req->nr_pages; i++)
115 if (req->pages[i])
116 put_page(req->pages[i]);
117 kfree(req);
121 #ifdef CONFIG_AFS_FSCACHE
123 * deal with notification that a page was read from the cache
125 static void afs_file_readpage_read_complete(struct page *page,
126 void *data,
127 int error)
129 _enter("%p,%p,%d", page, data, error);
131 /* if the read completes with an error, we just unlock the page and let
132 * the VM reissue the readpage */
133 if (!error)
134 SetPageUptodate(page);
135 unlock_page(page);
137 #endif
140 * read page from file, directory or symlink, given a key to use
142 int afs_page_filler(void *data, struct page *page)
144 struct inode *inode = page->mapping->host;
145 struct afs_vnode *vnode = AFS_FS_I(inode);
146 struct afs_read *req;
147 struct key *key = data;
148 int ret;
150 _enter("{%x},{%lu},{%lu}", key_serial(key), inode->i_ino, page->index);
152 BUG_ON(!PageLocked(page));
154 ret = -ESTALE;
155 if (test_bit(AFS_VNODE_DELETED, &vnode->flags))
156 goto error;
158 /* is it cached? */
159 #ifdef CONFIG_AFS_FSCACHE
160 ret = fscache_read_or_alloc_page(vnode->cache,
161 page,
162 afs_file_readpage_read_complete,
163 NULL,
164 GFP_KERNEL);
165 #else
166 ret = -ENOBUFS;
167 #endif
168 switch (ret) {
169 /* read BIO submitted (page in cache) */
170 case 0:
171 break;
173 /* page not yet cached */
174 case -ENODATA:
175 _debug("cache said ENODATA");
176 goto go_on;
178 /* page will not be cached */
179 case -ENOBUFS:
180 _debug("cache said ENOBUFS");
181 default:
182 go_on:
183 req = kzalloc(sizeof(struct afs_read) + sizeof(struct page *),
184 GFP_KERNEL);
185 if (!req)
186 goto enomem;
188 /* We request a full page. If the page is a partial one at the
189 * end of the file, the server will return a short read and the
190 * unmarshalling code will clear the unfilled space.
192 atomic_set(&req->usage, 1);
193 req->pos = (loff_t)page->index << PAGE_SHIFT;
194 req->len = PAGE_SIZE;
195 req->nr_pages = 1;
196 req->pages[0] = page;
197 get_page(page);
199 /* read the contents of the file from the server into the
200 * page */
201 ret = afs_vnode_fetch_data(vnode, key, req);
202 afs_put_read(req);
203 if (ret < 0) {
204 if (ret == -ENOENT) {
205 _debug("got NOENT from server"
206 " - marking file deleted and stale");
207 set_bit(AFS_VNODE_DELETED, &vnode->flags);
208 ret = -ESTALE;
211 #ifdef CONFIG_AFS_FSCACHE
212 fscache_uncache_page(vnode->cache, page);
213 #endif
214 BUG_ON(PageFsCache(page));
216 if (ret == -EINTR ||
217 ret == -ENOMEM ||
218 ret == -ERESTARTSYS ||
219 ret == -EAGAIN)
220 goto error;
221 goto io_error;
224 SetPageUptodate(page);
226 /* send the page to the cache */
227 #ifdef CONFIG_AFS_FSCACHE
228 if (PageFsCache(page) &&
229 fscache_write_page(vnode->cache, page, GFP_KERNEL) != 0) {
230 fscache_uncache_page(vnode->cache, page);
231 BUG_ON(PageFsCache(page));
233 #endif
234 unlock_page(page);
237 _leave(" = 0");
238 return 0;
240 io_error:
241 SetPageError(page);
242 goto error;
243 enomem:
244 ret = -ENOMEM;
245 error:
246 unlock_page(page);
247 _leave(" = %d", ret);
248 return ret;
252 * read page from file, directory or symlink, given a file to nominate the key
253 * to be used
255 static int afs_readpage(struct file *file, struct page *page)
257 struct key *key;
258 int ret;
260 if (file) {
261 key = file->private_data;
262 ASSERT(key != NULL);
263 ret = afs_page_filler(key, page);
264 } else {
265 struct inode *inode = page->mapping->host;
266 key = afs_request_key(AFS_FS_S(inode->i_sb)->volume->cell);
267 if (IS_ERR(key)) {
268 ret = PTR_ERR(key);
269 } else {
270 ret = afs_page_filler(key, page);
271 key_put(key);
274 return ret;
278 * Make pages available as they're filled.
280 static void afs_readpages_page_done(struct afs_call *call, struct afs_read *req)
282 #ifdef CONFIG_AFS_FSCACHE
283 struct afs_vnode *vnode = call->reply;
284 #endif
285 struct page *page = req->pages[req->index];
287 req->pages[req->index] = NULL;
288 SetPageUptodate(page);
290 /* send the page to the cache */
291 #ifdef CONFIG_AFS_FSCACHE
292 if (PageFsCache(page) &&
293 fscache_write_page(vnode->cache, page, GFP_KERNEL) != 0) {
294 fscache_uncache_page(vnode->cache, page);
295 BUG_ON(PageFsCache(page));
297 #endif
298 unlock_page(page);
299 put_page(page);
303 * Read a contiguous set of pages.
305 static int afs_readpages_one(struct file *file, struct address_space *mapping,
306 struct list_head *pages)
308 struct afs_vnode *vnode = AFS_FS_I(mapping->host);
309 struct afs_read *req;
310 struct list_head *p;
311 struct page *first, *page;
312 struct key *key = file->private_data;
313 pgoff_t index;
314 int ret, n, i;
316 /* Count the number of contiguous pages at the front of the list. Note
317 * that the list goes prev-wards rather than next-wards.
319 first = list_entry(pages->prev, struct page, lru);
320 index = first->index + 1;
321 n = 1;
322 for (p = first->lru.prev; p != pages; p = p->prev) {
323 page = list_entry(p, struct page, lru);
324 if (page->index != index)
325 break;
326 index++;
327 n++;
330 req = kzalloc(sizeof(struct afs_read) + sizeof(struct page *) * n,
331 GFP_NOFS);
332 if (!req)
333 return -ENOMEM;
335 atomic_set(&req->usage, 1);
336 req->page_done = afs_readpages_page_done;
337 req->pos = first->index;
338 req->pos <<= PAGE_SHIFT;
340 /* Transfer the pages to the request. We add them in until one fails
341 * to add to the LRU and then we stop (as that'll make a hole in the
342 * contiguous run.
344 * Note that it's possible for the file size to change whilst we're
345 * doing this, but we rely on the server returning less than we asked
346 * for if the file shrank. We also rely on this to deal with a partial
347 * page at the end of the file.
349 do {
350 page = list_entry(pages->prev, struct page, lru);
351 list_del(&page->lru);
352 index = page->index;
353 if (add_to_page_cache_lru(page, mapping, index,
354 readahead_gfp_mask(mapping))) {
355 #ifdef CONFIG_AFS_FSCACHE
356 fscache_uncache_page(vnode->cache, page);
357 #endif
358 put_page(page);
359 break;
362 req->pages[req->nr_pages++] = page;
363 req->len += PAGE_SIZE;
364 } while (req->nr_pages < n);
366 if (req->nr_pages == 0) {
367 kfree(req);
368 return 0;
371 ret = afs_vnode_fetch_data(vnode, key, req);
372 if (ret < 0)
373 goto error;
375 task_io_account_read(PAGE_SIZE * req->nr_pages);
376 afs_put_read(req);
377 return 0;
379 error:
380 if (ret == -ENOENT) {
381 _debug("got NOENT from server"
382 " - marking file deleted and stale");
383 set_bit(AFS_VNODE_DELETED, &vnode->flags);
384 ret = -ESTALE;
387 for (i = 0; i < req->nr_pages; i++) {
388 page = req->pages[i];
389 if (page) {
390 #ifdef CONFIG_AFS_FSCACHE
391 fscache_uncache_page(vnode->cache, page);
392 #endif
393 SetPageError(page);
394 unlock_page(page);
398 afs_put_read(req);
399 return ret;
403 * read a set of pages
405 static int afs_readpages(struct file *file, struct address_space *mapping,
406 struct list_head *pages, unsigned nr_pages)
408 struct key *key = file->private_data;
409 struct afs_vnode *vnode;
410 int ret = 0;
412 _enter("{%d},{%lu},,%d",
413 key_serial(key), mapping->host->i_ino, nr_pages);
415 ASSERT(key != NULL);
417 vnode = AFS_FS_I(mapping->host);
418 if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) {
419 _leave(" = -ESTALE");
420 return -ESTALE;
423 /* attempt to read as many of the pages as possible */
424 #ifdef CONFIG_AFS_FSCACHE
425 ret = fscache_read_or_alloc_pages(vnode->cache,
426 mapping,
427 pages,
428 &nr_pages,
429 afs_file_readpage_read_complete,
430 NULL,
431 mapping_gfp_mask(mapping));
432 #else
433 ret = -ENOBUFS;
434 #endif
436 switch (ret) {
437 /* all pages are being read from the cache */
438 case 0:
439 BUG_ON(!list_empty(pages));
440 BUG_ON(nr_pages != 0);
441 _leave(" = 0 [reading all]");
442 return 0;
444 /* there were pages that couldn't be read from the cache */
445 case -ENODATA:
446 case -ENOBUFS:
447 break;
449 /* other error */
450 default:
451 _leave(" = %d", ret);
452 return ret;
455 while (!list_empty(pages)) {
456 ret = afs_readpages_one(file, mapping, pages);
457 if (ret < 0)
458 break;
461 _leave(" = %d [netting]", ret);
462 return ret;
466 * write back a dirty page
468 static int afs_launder_page(struct page *page)
470 _enter("{%lu}", page->index);
472 return 0;
476 * invalidate part or all of a page
477 * - release a page and clean up its private data if offset is 0 (indicating
478 * the entire page)
480 static void afs_invalidatepage(struct page *page, unsigned int offset,
481 unsigned int length)
483 struct afs_writeback *wb = (struct afs_writeback *) page_private(page);
485 _enter("{%lu},%u,%u", page->index, offset, length);
487 BUG_ON(!PageLocked(page));
489 /* we clean up only if the entire page is being invalidated */
490 if (offset == 0 && length == PAGE_SIZE) {
491 #ifdef CONFIG_AFS_FSCACHE
492 if (PageFsCache(page)) {
493 struct afs_vnode *vnode = AFS_FS_I(page->mapping->host);
494 fscache_wait_on_page_write(vnode->cache, page);
495 fscache_uncache_page(vnode->cache, page);
497 #endif
499 if (PagePrivate(page)) {
500 if (wb && !PageWriteback(page)) {
501 set_page_private(page, 0);
502 afs_put_writeback(wb);
505 if (!page_private(page))
506 ClearPagePrivate(page);
510 _leave("");
514 * release a page and clean up its private state if it's not busy
515 * - return true if the page can now be released, false if not
517 static int afs_releasepage(struct page *page, gfp_t gfp_flags)
519 struct afs_writeback *wb = (struct afs_writeback *) page_private(page);
520 struct afs_vnode *vnode = AFS_FS_I(page->mapping->host);
522 _enter("{{%x:%u}[%lu],%lx},%x",
523 vnode->fid.vid, vnode->fid.vnode, page->index, page->flags,
524 gfp_flags);
526 /* deny if page is being written to the cache and the caller hasn't
527 * elected to wait */
528 #ifdef CONFIG_AFS_FSCACHE
529 if (!fscache_maybe_release_page(vnode->cache, page, gfp_flags)) {
530 _leave(" = F [cache busy]");
531 return 0;
533 #endif
535 if (PagePrivate(page)) {
536 if (wb) {
537 set_page_private(page, 0);
538 afs_put_writeback(wb);
540 ClearPagePrivate(page);
543 /* indicate that the page can be released */
544 _leave(" = T");
545 return 1;