loader: remove shouting from ORB's variable name
[hvf.git] / cp / mm / buddy.c
bloba31c1e16ce32d0405a77ea9450d05744d74c4698
1 /*
2 * (C) Copyright 2007-2010 Josef 'Jeff' Sipek <jeffpc@josefsipek.net>
4 * This file is released under the GPLv2. See the COPYING file for more
5 * details.
6 */
8 #include <list.h>
9 #include <page.h>
10 #include <mm.h>
11 #include <buddy.h>
12 #include <spinlock.h>
15 * Lists of free page ranges
17 static struct list_head orders_normal[64-PAGE_SHIFT];
18 static struct list_head orders_low[31-PAGE_SHIFT];
20 static spinlock_t orders_lock = SPIN_LOCK_UNLOCKED;
22 static void __init_buddy_alloc(u64 base, int pages, int max_order,
23 struct list_head *orders)
25 int order;
26 struct page *p;
28 for(order=0; order < max_order; order++) {
29 INIT_LIST_HEAD(&orders[order]);
31 if (pages & (1 << order)) {
32 p = addr_to_page((void*) base);
34 list_add(&p->buddy, &orders[order]);
36 base += (PAGE_SIZE << order);
42 * Initialize the buddy allocator. This is rather simple, and the only
43 * complication is that we must keep track of storage below 2GB separately
44 * since some IO related structures can address only the first 2GB. *sigh*
46 void init_buddy_alloc(u64 start)
48 u64 normal_pages;
49 u64 low_pages;
51 if (memsize > 2UL*1024*1024*1024) {
52 /* There are more than 2GB of storage */
53 low_pages = (2UL*1024*1024*1024 - start) >> PAGE_SHIFT;
54 normal_pages = (memsize - start) >> PAGE_SHIFT;
55 normal_pages -= low_pages;
56 } else {
57 /* All the storage is under the 2GB mark */
58 low_pages = (memsize - start) >> PAGE_SHIFT;
59 normal_pages = 0;
62 /* let's add all the free low storage to the lists */
63 __init_buddy_alloc(start, low_pages, ZONE_LOW_MAX_ORDER, orders_low);
65 /* now, let's add all the free storage above 2GB */
66 __init_buddy_alloc(2UL*1024*1024*1024, normal_pages, ZONE_NORMAL_MAX_ORDER,
67 orders_normal);
70 static struct page *__do_alloc_pages(int order, struct list_head *orders)
72 struct page *page;
74 if (!list_empty(&orders[order])) {
75 page = list_first_entry(&orders[order], struct page, buddy);
77 list_del(&page->buddy);
79 return page;
82 return NULL;
85 static struct page *__alloc_pages(int order, int type)
87 struct page *p;
89 /* Do we have to use ZONE_LOW? */
90 if (type == ZONE_LOW)
91 goto zone_low;
93 p = __do_alloc_pages(order, orders_normal);
94 if (p)
95 return p;
97 /* If there was no ZONE_NORMAL page, let's try ZONE_LOW */
99 zone_low:
100 return __do_alloc_pages(order, orders_low);
104 * Split a given (actual) order allocation into an allocation of wanted
105 * order, and return the rest of pages to the unused lists
107 * E.g.,
109 * pages (base):
111 * +---+---+---+---+---+---+---+---+
112 * | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 |
113 * +---+---+---+---+---+---+---+---+
115 * actual = 3
116 * wanted = 1
118 * return order 2 range back to free pool:
120 * +---+---+---+---+
121 * | 4 | 5 | 6 | 7 |
122 * +---+---+---+---+
124 * return order 1 range back to free pool:
126 * +---+---+
127 * | 2 | 3 |
128 * +---+---+
130 * The End. Now, pages 2-7 are back in the free pool, and pages 0&1 are an
131 * order 1 allocation.
134 static void __chip_pages(struct page *base, int actual, int wanted)
136 struct page *p;
137 struct list_head *orders;
139 orders = (IS_LOW_ZONE(base) ? orders_low : orders_normal);
141 for(; actual > wanted; actual--) {
142 p = &base[1 << (actual-1)];
143 list_add(&p->buddy, &orders[actual-1]);
148 * alloc_pages - allocate a series of consecutive pages
149 * @order: allocate 2^order consecutive pages
150 * @type: type of pages (<2GB, or anywhere)
152 struct page *alloc_pages(int order, int type)
154 struct page *page;
155 int gord;
156 unsigned long mask;
157 int max_order;
159 spin_lock_intsave(&orders_lock, &mask);
161 /* easy way */
162 page = __alloc_pages(order, type);
163 if (page)
164 goto out;
167 * There is no page-range of the given order, let's try to find the
168 * smallest one larger and split it
171 max_order = (type == ZONE_LOW ?
172 ZONE_LOW_MAX_ORDER : ZONE_NORMAL_MAX_ORDER);
174 for(gord=order+1; gord < max_order; gord++) {
175 if (gord >= max_order)
176 break;
178 page = __alloc_pages(gord, type);
179 if (!page)
180 continue;
182 __chip_pages(page, gord, order);
184 goto out;
187 /* alright, totally out of memory */
188 page = NULL;
190 out:
191 spin_unlock_intrestore(&orders_lock, mask);
193 return page;
196 void free_pages(void *ptr, int order)
198 struct page *base = addr_to_page(ptr);
199 unsigned long mask;
200 struct list_head *orders;
202 orders = IS_LOW_ZONE(base) ? orders_low : orders_normal;
204 spin_lock_intsave(&orders_lock, &mask);
205 list_add(&base->buddy, &orders[order]);
206 spin_unlock_intrestore(&orders_lock, mask);
209 * TODO: a more complex thing we could try is to coallesce adjecent
210 * page ranges into one higher order one (defrag)