2 * (C) Copyright 2007-2010 Josef 'Jeff' Sipek <jeffpc@josefsipek.net>
4 * This file is released under the GPLv2. See the COPYING file for more
15 * Lists of free page ranges
17 static struct list_head orders_normal
[64-PAGE_SHIFT
];
18 static struct list_head orders_low
[31-PAGE_SHIFT
];
20 static spinlock_t orders_lock
= SPIN_LOCK_UNLOCKED
;
22 static void __init_buddy_alloc(u64 base
, int pages
, int max_order
,
23 struct list_head
*orders
)
28 for(order
=0; order
< max_order
; order
++) {
29 INIT_LIST_HEAD(&orders
[order
]);
31 if (pages
& (1 << order
)) {
32 p
= addr_to_page((void*) base
);
34 list_add(&p
->buddy
, &orders
[order
]);
36 base
+= (PAGE_SIZE
<< order
);
42 * Initialize the buddy allocator. This is rather simple, and the only
43 * complication is that we must keep track of storage below 2GB separately
44 * since some IO related structures can address only the first 2GB. *sigh*
46 void init_buddy_alloc(u64 start
)
51 if (memsize
> 2UL*1024*1024*1024) {
52 /* There are more than 2GB of storage */
53 low_pages
= (2UL*1024*1024*1024 - start
) >> PAGE_SHIFT
;
54 normal_pages
= (memsize
- start
) >> PAGE_SHIFT
;
55 normal_pages
-= low_pages
;
57 /* All the storage is under the 2GB mark */
58 low_pages
= (memsize
- start
) >> PAGE_SHIFT
;
62 /* let's add all the free low storage to the lists */
63 __init_buddy_alloc(start
, low_pages
, ZONE_LOW_MAX_ORDER
, orders_low
);
65 /* now, let's add all the free storage above 2GB */
66 __init_buddy_alloc(2UL*1024*1024*1024, normal_pages
, ZONE_NORMAL_MAX_ORDER
,
70 static struct page
*__do_alloc_pages(int order
, struct list_head
*orders
)
74 if (!list_empty(&orders
[order
])) {
75 page
= list_first_entry(&orders
[order
], struct page
, buddy
);
77 list_del(&page
->buddy
);
85 static struct page
*__alloc_pages(int order
, int type
)
89 /* Do we have to use ZONE_LOW? */
93 p
= __do_alloc_pages(order
, orders_normal
);
97 /* If there was no ZONE_NORMAL page, let's try ZONE_LOW */
100 return __do_alloc_pages(order
, orders_low
);
104 * Split a given (actual) order allocation into an allocation of wanted
105 * order, and return the rest of pages to the unused lists
111 * +---+---+---+---+---+---+---+---+
112 * | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 |
113 * +---+---+---+---+---+---+---+---+
118 * return order 2 range back to free pool:
124 * return order 1 range back to free pool:
130 * The End. Now, pages 2-7 are back in the free pool, and pages 0&1 are an
131 * order 1 allocation.
134 static void __chip_pages(struct page
*base
, int actual
, int wanted
)
137 struct list_head
*orders
;
139 orders
= (IS_LOW_ZONE(base
) ? orders_low
: orders_normal
);
141 for(; actual
> wanted
; actual
--) {
142 p
= &base
[1 << (actual
-1)];
143 list_add(&p
->buddy
, &orders
[actual
-1]);
148 * alloc_pages - allocate a series of consecutive pages
149 * @order: allocate 2^order consecutive pages
150 * @type: type of pages (<2GB, or anywhere)
152 struct page
*alloc_pages(int order
, int type
)
159 spin_lock_intsave(&orders_lock
, &mask
);
162 page
= __alloc_pages(order
, type
);
167 * There is no page-range of the given order, let's try to find the
168 * smallest one larger and split it
171 max_order
= (type
== ZONE_LOW
?
172 ZONE_LOW_MAX_ORDER
: ZONE_NORMAL_MAX_ORDER
);
174 for(gord
=order
+1; gord
< max_order
; gord
++) {
175 if (gord
>= max_order
)
178 page
= __alloc_pages(gord
, type
);
182 __chip_pages(page
, gord
, order
);
187 /* alright, totally out of memory */
191 spin_unlock_intrestore(&orders_lock
, mask
);
196 void free_pages(void *ptr
, int order
)
198 struct page
*base
= addr_to_page(ptr
);
200 struct list_head
*orders
;
202 orders
= IS_LOW_ZONE(base
) ? orders_low
: orders_normal
;
204 spin_lock_intsave(&orders_lock
, &mask
);
205 list_add(&base
->buddy
, &orders
[order
]);
206 spin_unlock_intrestore(&orders_lock
, mask
);
209 * TODO: a more complex thing we could try is to coallesce adjecent
210 * page ranges into one higher order one (defrag)