matches_skb_connid_seqno bugfix on kernel packets, ref counter is_active is now gone...
[cor_2_6_31.git] / mm / quicklist.c
blobe66d07d1b4ff424b108c0bd111e2092f217aede0
1 /*
2 * Quicklist support.
4 * Quicklists are light weight lists of pages that have a defined state
5 * on alloc and free. Pages must be in the quicklist specific defined state
6 * (zero by default) when the page is freed. It seems that the initial idea
7 * for such lists first came from Dave Miller and then various other people
8 * improved on it.
10 * Copyright (C) 2007 SGI,
11 * Christoph Lameter <clameter@sgi.com>
12 * Generalized, added support for multiple lists and
13 * constructors / destructors.
15 #include <linux/kernel.h>
17 #include <linux/mm.h>
18 #include <linux/mmzone.h>
19 #include <linux/module.h>
20 #include <linux/quicklist.h>
22 DEFINE_PER_CPU(struct quicklist, quicklist)[CONFIG_NR_QUICK];
24 #define FRACTION_OF_NODE_MEM 16
26 static unsigned long max_pages(unsigned long min_pages)
28 unsigned long node_free_pages, max;
29 int node = numa_node_id();
30 struct zone *zones = NODE_DATA(node)->node_zones;
31 int num_cpus_on_node;
32 const struct cpumask *cpumask_on_node = cpumask_of_node(node);
34 node_free_pages =
35 #ifdef CONFIG_ZONE_DMA
36 zone_page_state(&zones[ZONE_DMA], NR_FREE_PAGES) +
37 #endif
38 #ifdef CONFIG_ZONE_DMA32
39 zone_page_state(&zones[ZONE_DMA32], NR_FREE_PAGES) +
40 #endif
41 zone_page_state(&zones[ZONE_NORMAL], NR_FREE_PAGES);
43 max = node_free_pages / FRACTION_OF_NODE_MEM;
45 num_cpus_on_node = cpus_weight_nr(*cpumask_on_node);
46 max /= num_cpus_on_node;
48 return max(max, min_pages);
51 static long min_pages_to_free(struct quicklist *q,
52 unsigned long min_pages, long max_free)
54 long pages_to_free;
56 pages_to_free = q->nr_pages - max_pages(min_pages);
58 return min(pages_to_free, max_free);
62 * Trim down the number of pages in the quicklist
64 void quicklist_trim(int nr, void (*dtor)(void *),
65 unsigned long min_pages, unsigned long max_free)
67 long pages_to_free;
68 struct quicklist *q;
70 q = &get_cpu_var(quicklist)[nr];
71 if (q->nr_pages > min_pages) {
72 pages_to_free = min_pages_to_free(q, min_pages, max_free);
74 while (pages_to_free > 0) {
76 * We pass a gfp_t of 0 to quicklist_alloc here
77 * because we will never call into the page allocator.
79 void *p = quicklist_alloc(nr, 0, NULL);
81 if (dtor)
82 dtor(p);
83 free_page((unsigned long)p);
84 pages_to_free--;
87 put_cpu_var(quicklist);
90 unsigned long quicklist_total_size(void)
92 unsigned long count = 0;
93 int cpu;
94 struct quicklist *ql, *q;
96 for_each_online_cpu(cpu) {
97 ql = per_cpu(quicklist, cpu);
98 for (q = ql; q < ql + CONFIG_NR_QUICK; q++)
99 count += q->nr_pages;
101 return count;