[PATCH] w1: hotplug support.
[linux-2.6/verdex.git] / mm / swap_state.c
blob029e56eb5e77c342559954e7c18c710a5c51092e
1 /*
2 * linux/mm/swap_state.c
4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
5 * Swap reorganised 29.12.95, Stephen Tweedie
7 * Rewritten to use page cache, (C) 1998 Stephen Tweedie
8 */
9 #include <linux/module.h>
10 #include <linux/mm.h>
11 #include <linux/kernel_stat.h>
12 #include <linux/swap.h>
13 #include <linux/init.h>
14 #include <linux/pagemap.h>
15 #include <linux/buffer_head.h>
16 #include <linux/backing-dev.h>
18 #include <asm/pgtable.h>
21 * swapper_space is a fiction, retained to simplify the path through
22 * vmscan's shrink_list, to make sync_page look nicer, and to allow
23 * future use of radix_tree tags in the swap cache.
25 static struct address_space_operations swap_aops = {
26 .writepage = swap_writepage,
27 .sync_page = block_sync_page,
28 .set_page_dirty = __set_page_dirty_nobuffers,
31 static struct backing_dev_info swap_backing_dev_info = {
32 .capabilities = BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_WRITEBACK,
33 .unplug_io_fn = swap_unplug_io_fn,
36 struct address_space swapper_space = {
37 .page_tree = RADIX_TREE_INIT(GFP_ATOMIC|__GFP_NOWARN),
38 .tree_lock = RW_LOCK_UNLOCKED,
39 .a_ops = &swap_aops,
40 .i_mmap_nonlinear = LIST_HEAD_INIT(swapper_space.i_mmap_nonlinear),
41 .backing_dev_info = &swap_backing_dev_info,
43 EXPORT_SYMBOL(swapper_space);
45 #define INC_CACHE_INFO(x) do { swap_cache_info.x++; } while (0)
47 static struct {
48 unsigned long add_total;
49 unsigned long del_total;
50 unsigned long find_success;
51 unsigned long find_total;
52 unsigned long noent_race;
53 unsigned long exist_race;
54 } swap_cache_info;
56 void show_swap_cache_info(void)
58 printk("Swap cache: add %lu, delete %lu, find %lu/%lu, race %lu+%lu\n",
59 swap_cache_info.add_total, swap_cache_info.del_total,
60 swap_cache_info.find_success, swap_cache_info.find_total,
61 swap_cache_info.noent_race, swap_cache_info.exist_race);
62 printk("Free swap = %lukB\n", nr_swap_pages << (PAGE_SHIFT - 10));
63 printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10));
67 * __add_to_swap_cache resembles add_to_page_cache on swapper_space,
68 * but sets SwapCache flag and private instead of mapping and index.
70 static int __add_to_swap_cache(struct page *page,
71 swp_entry_t entry, int gfp_mask)
73 int error;
75 BUG_ON(PageSwapCache(page));
76 BUG_ON(PagePrivate(page));
77 error = radix_tree_preload(gfp_mask);
78 if (!error) {
79 write_lock_irq(&swapper_space.tree_lock);
80 error = radix_tree_insert(&swapper_space.page_tree,
81 entry.val, page);
82 if (!error) {
83 page_cache_get(page);
84 SetPageLocked(page);
85 SetPageSwapCache(page);
86 page->private = entry.val;
87 total_swapcache_pages++;
88 pagecache_acct(1);
90 write_unlock_irq(&swapper_space.tree_lock);
91 radix_tree_preload_end();
93 return error;
96 static int add_to_swap_cache(struct page *page, swp_entry_t entry)
98 int error;
100 if (!swap_duplicate(entry)) {
101 INC_CACHE_INFO(noent_race);
102 return -ENOENT;
104 error = __add_to_swap_cache(page, entry, GFP_KERNEL);
106 * Anon pages are already on the LRU, we don't run lru_cache_add here.
108 if (error) {
109 swap_free(entry);
110 if (error == -EEXIST)
111 INC_CACHE_INFO(exist_race);
112 return error;
114 INC_CACHE_INFO(add_total);
115 return 0;
119 * This must be called only on pages that have
120 * been verified to be in the swap cache.
122 void __delete_from_swap_cache(struct page *page)
124 BUG_ON(!PageLocked(page));
125 BUG_ON(!PageSwapCache(page));
126 BUG_ON(PageWriteback(page));
127 BUG_ON(PagePrivate(page));
129 radix_tree_delete(&swapper_space.page_tree, page->private);
130 page->private = 0;
131 ClearPageSwapCache(page);
132 total_swapcache_pages--;
133 pagecache_acct(-1);
134 INC_CACHE_INFO(del_total);
138 * add_to_swap - allocate swap space for a page
139 * @page: page we want to move to swap
141 * Allocate swap space for the page and add the page to the
142 * swap cache. Caller needs to hold the page lock.
144 int add_to_swap(struct page * page)
146 swp_entry_t entry;
147 int err;
149 if (!PageLocked(page))
150 BUG();
152 for (;;) {
153 entry = get_swap_page();
154 if (!entry.val)
155 return 0;
158 * Radix-tree node allocations from PF_MEMALLOC contexts could
159 * completely exhaust the page allocator. __GFP_NOMEMALLOC
160 * stops emergency reserves from being allocated.
162 * TODO: this could cause a theoretical memory reclaim
163 * deadlock in the swap out path.
166 * Add it to the swap cache and mark it dirty
168 err = __add_to_swap_cache(page, entry,
169 GFP_ATOMIC|__GFP_NOMEMALLOC|__GFP_NOWARN);
171 switch (err) {
172 case 0: /* Success */
173 SetPageUptodate(page);
174 SetPageDirty(page);
175 INC_CACHE_INFO(add_total);
176 return 1;
177 case -EEXIST:
178 /* Raced with "speculative" read_swap_cache_async */
179 INC_CACHE_INFO(exist_race);
180 swap_free(entry);
181 continue;
182 default:
183 /* -ENOMEM radix-tree allocation failure */
184 swap_free(entry);
185 return 0;
191 * This must be called only on pages that have
192 * been verified to be in the swap cache and locked.
193 * It will never put the page into the free list,
194 * the caller has a reference on the page.
196 void delete_from_swap_cache(struct page *page)
198 swp_entry_t entry;
200 entry.val = page->private;
202 write_lock_irq(&swapper_space.tree_lock);
203 __delete_from_swap_cache(page);
204 write_unlock_irq(&swapper_space.tree_lock);
206 swap_free(entry);
207 page_cache_release(page);
211 * Strange swizzling function only for use by shmem_writepage
213 int move_to_swap_cache(struct page *page, swp_entry_t entry)
215 int err = __add_to_swap_cache(page, entry, GFP_ATOMIC);
216 if (!err) {
217 remove_from_page_cache(page);
218 page_cache_release(page); /* pagecache ref */
219 if (!swap_duplicate(entry))
220 BUG();
221 SetPageDirty(page);
222 INC_CACHE_INFO(add_total);
223 } else if (err == -EEXIST)
224 INC_CACHE_INFO(exist_race);
225 return err;
229 * Strange swizzling function for shmem_getpage (and shmem_unuse)
231 int move_from_swap_cache(struct page *page, unsigned long index,
232 struct address_space *mapping)
234 int err = add_to_page_cache(page, mapping, index, GFP_ATOMIC);
235 if (!err) {
236 delete_from_swap_cache(page);
237 /* shift page from clean_pages to dirty_pages list */
238 ClearPageDirty(page);
239 set_page_dirty(page);
241 return err;
245 * If we are the only user, then try to free up the swap cache.
247 * Its ok to check for PageSwapCache without the page lock
248 * here because we are going to recheck again inside
249 * exclusive_swap_page() _with_ the lock.
250 * - Marcelo
252 static inline void free_swap_cache(struct page *page)
254 if (PageSwapCache(page) && !TestSetPageLocked(page)) {
255 remove_exclusive_swap_page(page);
256 unlock_page(page);
261 * Perform a free_page(), also freeing any swap cache associated with
262 * this page if it is the last user of the page. Can not do a lock_page,
263 * as we are holding the page_table_lock spinlock.
265 void free_page_and_swap_cache(struct page *page)
267 free_swap_cache(page);
268 page_cache_release(page);
272 * Passed an array of pages, drop them all from swapcache and then release
273 * them. They are removed from the LRU and freed if this is their last use.
275 void free_pages_and_swap_cache(struct page **pages, int nr)
277 int chunk = 16;
278 struct page **pagep = pages;
280 lru_add_drain();
281 while (nr) {
282 int todo = min(chunk, nr);
283 int i;
285 for (i = 0; i < todo; i++)
286 free_swap_cache(pagep[i]);
287 release_pages(pagep, todo, 0);
288 pagep += todo;
289 nr -= todo;
294 * Lookup a swap entry in the swap cache. A found page will be returned
295 * unlocked and with its refcount incremented - we rely on the kernel
296 * lock getting page table operations atomic even if we drop the page
297 * lock before returning.
299 struct page * lookup_swap_cache(swp_entry_t entry)
301 struct page *page;
303 page = find_get_page(&swapper_space, entry.val);
305 if (page)
306 INC_CACHE_INFO(find_success);
308 INC_CACHE_INFO(find_total);
309 return page;
313 * Locate a page of swap in physical memory, reserving swap cache space
314 * and reading the disk if it is not already cached.
315 * A failure return means that either the page allocation failed or that
316 * the swap entry is no longer in use.
318 struct page *read_swap_cache_async(swp_entry_t entry,
319 struct vm_area_struct *vma, unsigned long addr)
321 struct page *found_page, *new_page = NULL;
322 int err;
324 do {
326 * First check the swap cache. Since this is normally
327 * called after lookup_swap_cache() failed, re-calling
328 * that would confuse statistics.
330 found_page = find_get_page(&swapper_space, entry.val);
331 if (found_page)
332 break;
335 * Get a new page to read into from swap.
337 if (!new_page) {
338 new_page = alloc_page_vma(GFP_HIGHUSER, vma, addr);
339 if (!new_page)
340 break; /* Out of memory */
344 * Associate the page with swap entry in the swap cache.
345 * May fail (-ENOENT) if swap entry has been freed since
346 * our caller observed it. May fail (-EEXIST) if there
347 * is already a page associated with this entry in the
348 * swap cache: added by a racing read_swap_cache_async,
349 * or by try_to_swap_out (or shmem_writepage) re-using
350 * the just freed swap entry for an existing page.
351 * May fail (-ENOMEM) if radix-tree node allocation failed.
353 err = add_to_swap_cache(new_page, entry);
354 if (!err) {
356 * Initiate read into locked page and return.
358 lru_cache_add_active(new_page);
359 swap_readpage(NULL, new_page);
360 return new_page;
362 } while (err != -ENOENT && err != -ENOMEM);
364 if (new_page)
365 page_cache_release(new_page);
366 return found_page;