4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
8 * This file contains the default values for the opereation of the
9 * Linux VM subsystem. Fine-tuning documentation can be found in
10 * linux/Documentation/sysctl/vm.txt.
12 * Swap aging added 23.2.95, Stephen Tweedie.
13 * Buffermem limits added 12.3.98, Rik van Riel.
17 #include <linux/kernel_stat.h>
18 #include <linux/swap.h>
19 #include <linux/swapctl.h>
20 #include <linux/pagemap.h>
21 #include <linux/init.h>
24 #include <asm/uaccess.h> /* for copy_to/from_user */
25 #include <asm/pgtable.h>
28 * We identify three levels of free memory. We never let free mem
29 * fall below the freepages.min except for atomic allocations. We
30 * start background swapping if we fall below freepages.high free
31 * pages, and we begin intensive swapping below freepages.low.
33 * Actual initialization is done in mm/page_alloc.c or
34 * arch/sparc(64)/mm/init.c.
36 freepages_t freepages
= {
37 0, /* freepages.min */
38 0, /* freepages.low */
39 0 /* freepages.high */
42 /* How many pages do we try to swap or page in/out together? */
46 * This variable contains the amount of page steals the system
47 * is doing, averaged over a minute. We use this to determine how
48 * many inactive pages we should have.
50 * In reclaim_page and __alloc_pages: memory_pressure++
51 * In __free_pages_ok: memory_pressure--
52 * In recalculate_vm_stats the value is decayed (once a second)
56 /* We track the number of pages currently being asynchronously swapped
57 out, so that we don't try to swap TOO many pages out at once */
58 atomic_t nr_async_pages
= ATOMIC_INIT(0);
60 buffer_mem_t buffer_mem
= {
61 2, /* minimum percent buffer */
62 10, /* borrow percent buffer */
63 60 /* maximum percent buffer */
66 buffer_mem_t page_cache
= {
67 2, /* minimum percent page cache */
68 15, /* borrow percent page cache */
72 pager_daemon_t pager_daemon
= {
73 512, /* base number for calculating the number of tries */
74 SWAP_CLUSTER_MAX
, /* minimum number of tries */
75 8, /* do swap I/O in clusters of this size */
79 * age_page_{up,down} - page aging helper functions
80 * @page - the page we want to age
81 * @nolock - are we already holding the pagelist_lru_lock?
83 * If the page is on one of the lists (active, inactive_dirty or
84 * inactive_clean), we will grab the pagelist_lru_lock as needed.
85 * If you're already holding the lock, call this function with the
86 * nolock argument non-zero.
88 void age_page_up_nolock(struct page
* page
)
91 * We're dealing with an inactive page, move the page
95 activate_page_nolock(page
);
97 /* The actual page aging bit */
98 page
->age
+= PAGE_AGE_ADV
;
99 if (page
->age
> PAGE_AGE_MAX
)
100 page
->age
= PAGE_AGE_MAX
;
104 * We use this (minimal) function in the case where we
105 * know we can't deactivate the page (yet).
107 void age_page_down_ageonly(struct page
* page
)
112 void age_page_down_nolock(struct page
* page
)
114 /* The actual page aging bit */
118 * The page is now an old page. Move to the inactive
119 * list (if possible ... see below).
122 deactivate_page_nolock(page
);
125 void age_page_up(struct page
* page
)
128 * We're dealing with an inactive page, move the page
129 * to the active list.
134 /* The actual page aging bit */
135 page
->age
+= PAGE_AGE_ADV
;
136 if (page
->age
> PAGE_AGE_MAX
)
137 page
->age
= PAGE_AGE_MAX
;
140 void age_page_down(struct page
* page
)
142 /* The actual page aging bit */
146 * The page is now an old page. Move to the inactive
147 * list (if possible ... see below).
150 deactivate_page(page
);
155 * (de)activate_page - move pages from/to active and inactive lists
156 * @page: the page we want to move
157 * @nolock - are we already holding the pagemap_lru_lock?
159 * Deactivate_page will move an active page to the right
160 * inactive list, while activate_page will move a page back
161 * from one of the inactive lists to the active list. If
162 * called on a page which is not on any of the lists, the
163 * page is left alone.
165 void deactivate_page_nolock(struct page
* page
)
168 * One for the cache, one for the extra reference the
169 * caller has and (maybe) one for the buffers.
171 * This isn't perfect, but works for just about everything.
172 * Besides, as long as we don't move unfreeable pages to the
173 * inactive_clean list it doesn't need to be perfect...
175 int maxcount
= (page
->buffers
? 3 : 2);
177 ClearPageReferenced(page
);
180 * Don't touch it if it's not on the active list.
181 * (some pages aren't on any list at all)
183 if (PageActive(page
) && page_count(page
) <= maxcount
&& !page_ramdisk(page
)) {
184 del_page_from_active_list(page
);
185 add_page_to_inactive_dirty_list(page
);
189 void deactivate_page(struct page
* page
)
191 spin_lock(&pagemap_lru_lock
);
192 deactivate_page_nolock(page
);
193 spin_unlock(&pagemap_lru_lock
);
197 * Move an inactive page to the active list.
199 void activate_page_nolock(struct page
* page
)
201 if (PageInactiveDirty(page
)) {
202 del_page_from_inactive_dirty_list(page
);
203 add_page_to_active_list(page
);
204 } else if (PageInactiveClean(page
)) {
205 del_page_from_inactive_clean_list(page
);
206 add_page_to_active_list(page
);
209 * The page was not on any list, so we take care
210 * not to do anything.
214 /* Make sure the page gets a fair chance at staying active. */
215 if (page
->age
< PAGE_AGE_START
)
216 page
->age
= PAGE_AGE_START
;
219 void activate_page(struct page
* page
)
221 spin_lock(&pagemap_lru_lock
);
222 activate_page_nolock(page
);
223 spin_unlock(&pagemap_lru_lock
);
227 * lru_cache_add: add a page to the page lists
228 * @page: the page to add
230 void lru_cache_add(struct page
* page
)
232 spin_lock(&pagemap_lru_lock
);
233 if (!PageLocked(page
))
236 add_page_to_active_list(page
);
237 /* This should be relatively rare */
239 deactivate_page_nolock(page
);
240 spin_unlock(&pagemap_lru_lock
);
244 * __lru_cache_del: remove a page from the page lists
245 * @page: the page to add
247 * This function is for when the caller already holds
248 * the pagemap_lru_lock.
250 void __lru_cache_del(struct page
* page
)
252 if (PageActive(page
)) {
253 del_page_from_active_list(page
);
254 } else if (PageInactiveDirty(page
)) {
255 del_page_from_inactive_dirty_list(page
);
256 } else if (PageInactiveClean(page
)) {
257 del_page_from_inactive_clean_list(page
);
259 printk("VM: __lru_cache_del, found unknown page ?!\n");
265 * lru_cache_del: remove a page from the page lists
266 * @page: the page to remove
268 void lru_cache_del(struct page
* page
)
270 if (!PageLocked(page
))
272 spin_lock(&pagemap_lru_lock
);
273 __lru_cache_del(page
);
274 spin_unlock(&pagemap_lru_lock
);
278 * recalculate_vm_stats - recalculate VM statistics
280 * This function should be called once a second to recalculate
281 * some useful statistics the VM subsystem uses to determine
284 void recalculate_vm_stats(void)
287 * Substract one second worth of memory_pressure from
290 memory_pressure
-= (memory_pressure
>> INACTIVE_SHIFT
);
294 * Perform any setup for the swap system
296 void __init
swap_setup(void)
298 /* Use a smaller cluster for memory <16MB or <32MB */
299 if (num_physpages
< ((16 * 1024 * 1024) >> PAGE_SHIFT
))
301 else if (num_physpages
< ((32 * 1024 * 1024) >> PAGE_SHIFT
))