replace some function names
[linux-2.6/zen-sources.git] / mm / dyn_pageflags.c
blob46c808ea2a5792148fa72205e81cb84b000a83fa
1 /*
2 * lib/dyn_pageflags.c
4 * Copyright (C) 2004-2008 Nigel Cunningham <nigel at tuxonice net>
6 * This file is released under the GPLv2.
8 * Routines for dynamically allocating and releasing bitmaps
9 * used as pseudo-pageflags.
11 * We use bitmaps, built out of order zero allocations and
12 * linked together by kzalloc'd arrays of pointers into
13 * an array that looks like...
15 * pageflags->bitmap[node][zone_id][page_num][ul]
17 * All of this is transparent to the caller, who just uses
18 * the allocate & free routines to create/destroy bitmaps,
19 * and get/set/clear to operate on individual flags.
21 * Bitmaps can be sparse, with the individual pages only being
22 * allocated when a bit is set in the page.
24 * Memory hotplugging support is work in progress. A zone's
25 * start_pfn may change. If it does, we need to reallocate
26 * the zone bitmap, adding additional pages to the front to
27 * cover the bitmap. For simplicity, we don't shift the
28 * contents of existing pages around. The lock is only used
29 * to avoid reentrancy when resizing zones. The replacement
30 * of old data with new is done atomically. If we try to test
31 * a bit in the new area before the update is completed, we
32 * know it's zero.
34 * TuxOnIce knows the structure of these pageflags, so that
35 * it can serialise them in the image header. TODO: Make
36 * that support more generic so that TuxOnIce doesn't need
37 * to know how dyn_pageflags are stored.
40 /* Avoid warnings in include/linux/mm.h */
41 struct page;
42 struct dyn_pageflags;
43 int test_dynpageflag(struct dyn_pageflags *bitmap, struct page *page);
45 #include <linux/bootmem.h>
46 #include <linux/dyn_pageflags.h>
47 #include <linux/module.h>
48 #include <linux/suspend.h>
50 static LIST_HEAD(flags_list);
51 static DEFINE_SPINLOCK(flags_list_lock);
53 static void* (*dyn_allocator)(unsigned long size, unsigned long flags);
55 static int dyn_pageflags_debug;
57 #define PR_DEBUG(a, b...) \
58 do { if (dyn_pageflags_debug) printk(a, ##b); } while (0)
59 #define DUMP_DEBUG(bitmap) \
60 do { if (dyn_pageflags_debug) dump_pagemap(bitmap); } while (0)
62 #if BITS_PER_LONG == 32
63 #define UL_SHIFT 5
64 #else
65 #if BITS_PER_LONG == 64
66 #define UL_SHIFT 6
67 #else
68 #error Bits per long not 32 or 64?
69 #endif
70 #endif
72 #ifndef PHYS_PFN_OFFSET
73 #define PHYS_PFN_OFFSET 0
74 #endif
76 #define BIT_NUM_MASK ((sizeof(unsigned long) << 3) - 1)
77 #define PAGE_NUM_MASK (~((1 << (PAGE_SHIFT + 3)) - 1))
78 #define UL_NUM_MASK (~(BIT_NUM_MASK | PAGE_NUM_MASK))
80 #define ZONE_START(thiszone) ((thiszone)->zone_start_pfn - PHYS_PFN_OFFSET)
83 * PAGENUMBER gives the index of the page within the zone.
84 * PAGEINDEX gives the index of the unsigned long within that page.
85 * PAGEBIT gives the index of the bit within the unsigned long.
87 #define PAGENUMBER(zone_offset) ((int) (zone_offset >> (PAGE_SHIFT + 3)))
88 #define PAGEINDEX(zone_offset) ((int) ((zone_offset & UL_NUM_MASK) >> UL_SHIFT))
89 #define PAGEBIT(zone_offset) ((int) (zone_offset & BIT_NUM_MASK))
91 #define PAGE_UL_PTR(bitmap, node, zone_num, zone_pfn) \
92 ((bitmap[node][zone_num][PAGENUMBER(zone_pfn)])+PAGEINDEX(zone_pfn))
94 #define pages_for_zone(zone) \
95 (DIV_ROUND_UP((zone)->spanned_pages, (PAGE_SIZE << 3)))
97 #define pages_for_span(span) \
98 (DIV_ROUND_UP(span, PAGE_SIZE << 3))
100 /* __maybe_unused for testing functions below */
101 #define GET_BIT_AND_UL(pageflags, page) \
102 struct zone *zone = page_zone(page); \
103 unsigned long pfn = page_to_pfn(page); \
104 unsigned long zone_pfn = pfn - ZONE_START(zone); \
105 int node = page_to_nid(page); \
106 int zone_num = zone_idx(zone); \
107 int pagenum = PAGENUMBER(zone_pfn) + 2; \
108 int page_offset = PAGEINDEX(zone_pfn); \
109 unsigned long **zone_array = ((pageflags)->bitmap && \
110 (pageflags)->bitmap[node] && \
111 (pageflags)->bitmap[node][zone_num]) ? \
112 (pageflags)->bitmap[node][zone_num] : NULL; \
113 unsigned long __maybe_unused *ul = (zone_array && \
114 (unsigned long) zone_array[0] <= pfn && \
115 (unsigned long) zone_array[1] >= (pagenum-2) && \
116 zone_array[pagenum]) ? zone_array[pagenum] + page_offset : \
117 NULL; \
118 int bit __maybe_unused = PAGEBIT(zone_pfn);
120 #define for_each_online_pgdat_zone(pgdat, zone_nr) \
121 for_each_online_pgdat(pgdat) \
122 for (zone_nr = 0; zone_nr < MAX_NR_ZONES; zone_nr++)
125 * dump_pagemap - Display the contents of a bitmap for debugging purposes.
127 * @pagemap: The array to be dumped.
129 void dump_pagemap(struct dyn_pageflags *pagemap)
131 int i = 0;
132 struct pglist_data *pgdat;
133 unsigned long ****bitmap = pagemap->bitmap;
135 printk(" --- Dump bitmap %p ---\n", pagemap);
137 printk(KERN_INFO "%p: Sparse flag = %d\n",
138 &pagemap->sparse, pagemap->sparse);
139 printk(KERN_INFO "%p: Bitmap = %p\n",
140 &pagemap->bitmap, bitmap);
142 if (!bitmap)
143 goto out;
145 for_each_online_pgdat(pgdat) {
146 int node_id = pgdat->node_id, zone_nr;
147 printk(KERN_INFO "%p: Node %d => %p\n",
148 &bitmap[node_id], node_id,
149 bitmap[node_id]);
150 if (!bitmap[node_id])
151 continue;
152 for (zone_nr = 0; zone_nr < MAX_NR_ZONES; zone_nr++) {
153 printk(KERN_INFO "%p: Zone %d => %p%s\n",
154 &bitmap[node_id][zone_nr], zone_nr,
155 bitmap[node_id][zone_nr],
156 bitmap[node_id][zone_nr] ? "" :
157 " (empty)");
158 if (!bitmap[node_id][zone_nr])
159 continue;
161 printk(KERN_INFO "%p: Zone start pfn = %p\n",
162 &bitmap[node_id][zone_nr][0],
163 bitmap[node_id][zone_nr][0]);
164 printk(KERN_INFO "%p: Number of pages = %p\n",
165 &bitmap[node_id][zone_nr][1],
166 bitmap[node_id][zone_nr][1]);
167 for (i = 2; i < (unsigned long) bitmap[node_id]
168 [zone_nr][1] + 2; i++)
169 printk(KERN_INFO
170 "%p: Page %2d = %p\n",
171 &bitmap[node_id][zone_nr][i],
172 i - 2,
173 bitmap[node_id][zone_nr][i]);
176 out:
177 printk(KERN_INFO " --- Dump of bitmap %p finishes\n", pagemap);
179 EXPORT_IF_TOI_MODULAR(dump_pagemap);
182 * clear_dyn_pageflags - Zero all pageflags in a bitmap.
184 * @pagemap: The array to be cleared.
186 * Clear an array used to store dynamically allocated pageflags.
188 void clear_dyn_pageflags(struct dyn_pageflags *pagemap)
190 int i = 0, zone_idx;
191 struct pglist_data *pgdat;
192 unsigned long ****bitmap = pagemap->bitmap;
194 for_each_online_pgdat_zone(pgdat, zone_idx) {
195 int node_id = pgdat->node_id;
196 struct zone *zone = &pgdat->node_zones[zone_idx];
198 if (!populated_zone(zone) ||
199 (!bitmap[node_id] || !bitmap[node_id][zone_idx]))
200 continue;
202 for (i = 2; i < pages_for_zone(zone) + 2; i++)
203 if (bitmap[node_id][zone_idx][i])
204 memset((bitmap[node_id][zone_idx][i]), 0,
205 PAGE_SIZE);
208 EXPORT_IF_TOI_MODULAR(clear_dyn_pageflags);
211 * Allocators.
213 * During boot time, we want to use alloc_bootmem_low. Afterwards, we want
214 * kzalloc. These routines let us do that without causing compile time warnings
215 * about mismatched sections, as would happen if we did a simple
216 * boot ? alloc_bootmem_low() : kzalloc() below.
220 * boot_time_allocator - Allocator used while booting.
222 * @size: Number of bytes wanted.
223 * @flags: Allocation flags (ignored here).
225 static __init void *boot_time_allocator(unsigned long size, unsigned long flags)
227 return alloc_bootmem_low(size);
231 * normal_allocator - Allocator used post-boot.
233 * @size: Number of bytes wanted.
234 * @flags: Allocation flags.
236 * Allocate memory for our page flags.
238 static void *normal_allocator(unsigned long size, unsigned long flags)
240 if (size == PAGE_SIZE)
241 return (void *) get_zeroed_page(flags);
242 else
243 return kzalloc(size, flags);
247 * dyn_pageflags_init - Do the earliest initialisation.
249 * Very early in the boot process, set our allocator (alloc_bootmem_low) and
250 * allocate bitmaps for slab and buddy pageflags.
252 void __init dyn_pageflags_init(void)
254 dyn_allocator = boot_time_allocator;
258 * dyn_pageflags_use_kzalloc - Reset the allocator for normal use.
260 * Reset the allocator to our normal, post boot function.
262 void __init dyn_pageflags_use_kzalloc(void)
264 dyn_allocator = (void *) normal_allocator;
268 * try_alloc_dyn_pageflag_part - Try to allocate a pointer array.
270 * Try to allocate a contiguous array of pointers.
272 static int try_alloc_dyn_pageflag_part(int nr_ptrs, void **ptr)
274 *ptr = (*dyn_allocator)(sizeof(void *) * nr_ptrs, GFP_ATOMIC);
276 if (*ptr)
277 return 0;
279 printk(KERN_INFO
280 "Error. Unable to allocate memory for dynamic pageflags.");
281 return -ENOMEM;
284 static int populate_bitmap_page(struct dyn_pageflags *pageflags, int take_lock,
285 unsigned long **page_ptr)
287 void *address;
288 unsigned long flags = 0;
290 if (take_lock)
291 spin_lock_irqsave(&pageflags->struct_lock, flags);
294 * The page may have been allocated while we waited.
296 if (*page_ptr)
297 goto out;
299 address = (*dyn_allocator)(PAGE_SIZE, GFP_ATOMIC);
301 if (!address) {
302 PR_DEBUG("Error. Unable to allocate memory for "
303 "dynamic pageflags page.");
304 if (pageflags)
305 spin_unlock_irqrestore(&pageflags->struct_lock, flags);
306 return -ENOMEM;
309 *page_ptr = address;
310 out:
311 if (take_lock)
312 spin_unlock_irqrestore(&pageflags->struct_lock, flags);
313 return 0;
317 * resize_zone_bitmap - Resize the array of pages for a bitmap.
319 * Shrink or extend a list of pages for a zone in a bitmap, preserving
320 * existing data.
322 static int resize_zone_bitmap(struct dyn_pageflags *pagemap, struct zone *zone,
323 unsigned long old_pages, unsigned long new_pages,
324 unsigned long copy_offset, int take_lock)
326 unsigned long **new_ptr = NULL, ****bitmap = pagemap->bitmap;
327 int node_id = zone_to_nid(zone), zone_idx = zone_idx(zone),
328 to_copy = min(old_pages, new_pages), result = 0;
329 unsigned long **old_ptr = bitmap[node_id][zone_idx], i;
331 if (new_pages) {
332 if (try_alloc_dyn_pageflag_part(new_pages + 2,
333 (void **) &new_ptr))
334 return -ENOMEM;
336 if (old_pages)
337 memcpy(new_ptr + 2 + copy_offset, old_ptr + 2,
338 sizeof(unsigned long) * to_copy);
340 new_ptr[0] = (void *) ZONE_START(zone);
341 new_ptr[1] = (void *) new_pages;
344 /* Free/alloc bitmap pages. */
345 if (old_pages > new_pages) {
346 for (i = new_pages + 2; i < old_pages + 2; i++)
347 if (old_ptr[i])
348 free_page((unsigned long) old_ptr[i]);
349 } else if (!pagemap->sparse) {
350 for (i = old_pages + 2; i < new_pages + 2; i++)
351 if (populate_bitmap_page(NULL, take_lock,
352 (unsigned long **) &new_ptr[i])) {
353 result = -ENOMEM;
354 break;
358 bitmap[node_id][zone_idx] = new_ptr;
359 kfree(old_ptr);
360 return result;
364 * check_dyn_pageflag_range - Resize a section of a dyn_pageflag array.
366 * @pagemap: The array to be worked on.
367 * @zone: The zone to get in sync with reality.
369 * Check the pagemap has correct allocations for the zone. This can be
370 * invoked when allocating a new bitmap, or for hot[un]plug, and so
371 * must deal with any disparities between zone_start_pfn/spanned_pages
372 * and what we have allocated. In addition, we must deal with the possibility
373 * of zone_start_pfn having changed.
375 int check_dyn_pageflag_zone(struct dyn_pageflags *pagemap, struct zone *zone,
376 int force_free_all, int take_lock)
378 int node_id = zone_to_nid(zone), zone_idx = zone_idx(zone);
379 unsigned long copy_offset = 0, old_pages, new_pages;
380 unsigned long **old_ptr = pagemap->bitmap[node_id][zone_idx];
382 old_pages = old_ptr ? (unsigned long) old_ptr[1] : 0;
383 new_pages = force_free_all ? 0 : pages_for_span(zone->spanned_pages);
385 if (old_pages == new_pages &&
386 (!old_pages || (unsigned long) old_ptr[0] == ZONE_START(zone)))
387 return 0;
389 if (old_pages &&
390 (unsigned long) old_ptr[0] != ZONE_START(zone))
391 copy_offset = pages_for_span((unsigned long) old_ptr[0] -
392 ZONE_START(zone));
394 /* New/expanded zone? */
395 return resize_zone_bitmap(pagemap, zone, old_pages, new_pages,
396 copy_offset, take_lock);
399 #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
401 * dyn_pageflags_hotplug - Add pages to bitmaps for hotplugged memory.
403 * Seek to expand bitmaps for hotplugged memory. We ignore any failure.
404 * Since we handle sparse bitmaps anyway, they'll be automatically
405 * populated as needed.
407 void dyn_pageflags_hotplug(struct zone *zone)
409 struct dyn_pageflags *this;
411 list_for_each_entry(this, &flags_list, list)
412 check_dyn_pageflag_zone(this, zone, 0, 1);
414 #endif
417 * free_dyn_pageflags - Free an array of dynamically allocated pageflags.
419 * @pagemap: The array to be freed.
421 * Free a dynamically allocated pageflags bitmap.
423 void free_dyn_pageflags(struct dyn_pageflags *pagemap)
425 int zone_idx;
426 struct pglist_data *pgdat;
427 unsigned long flags;
429 DUMP_DEBUG(pagemap);
431 if (!pagemap->bitmap)
432 return;
434 for_each_online_pgdat_zone(pgdat, zone_idx)
435 check_dyn_pageflag_zone(pagemap,
436 &pgdat->node_zones[zone_idx], 1, 1);
438 for_each_online_pgdat(pgdat) {
439 int i = pgdat->node_id;
441 if (pagemap->bitmap[i])
442 kfree((pagemap->bitmap)[i]);
445 kfree(pagemap->bitmap);
446 pagemap->bitmap = NULL;
448 pagemap->initialised = 0;
450 if (!pagemap->sparse) {
451 spin_lock_irqsave(&flags_list_lock, flags);
452 list_del_init(&pagemap->list);
453 pagemap->sparse = 1;
454 spin_unlock_irqrestore(&flags_list_lock, flags);
457 EXPORT_IF_TOI_MODULAR(free_dyn_pageflags);
460 * allocate_dyn_pageflags - Allocate a bitmap.
462 * @pagemap: The bitmap we want to allocate.
463 * @sparse: Whether to make the array sparse.
465 * The array we're preparing. If sparse, we don't allocate the actual
466 * pages until they're needed. If not sparse, we add the bitmap to the
467 * list so that if we're supporting memory hotplugging, we can allocate
468 * new pages on hotplug events.
470 * This routine may be called directly, or indirectly when the first bit
471 * needs to be set on a previously unused bitmap.
473 int allocate_dyn_pageflags(struct dyn_pageflags *pagemap, int sparse)
475 int zone_idx, result = -ENOMEM;
476 struct zone *zone;
477 struct pglist_data *pgdat;
478 unsigned long flags;
480 if (!sparse && (pagemap->sparse || !pagemap->initialised)) {
481 spin_lock_irqsave(&flags_list_lock, flags);
482 list_add(&pagemap->list, &flags_list);
483 spin_unlock_irqrestore(&flags_list_lock, flags);
486 spin_lock_irqsave(&pagemap->struct_lock, flags);
488 pagemap->initialised = 1;
489 pagemap->sparse = sparse;
491 if (!pagemap->bitmap && try_alloc_dyn_pageflag_part((1 << NODES_WIDTH),
492 (void **) &pagemap->bitmap))
493 goto out;
495 for_each_online_pgdat(pgdat) {
496 int node_id = pgdat->node_id;
498 if (!pagemap->bitmap[node_id] &&
499 try_alloc_dyn_pageflag_part(MAX_NR_ZONES,
500 (void **) &(pagemap->bitmap)[node_id]))
501 goto out;
503 for (zone_idx = 0; zone_idx < MAX_NR_ZONES; zone_idx++) {
504 zone = &pgdat->node_zones[zone_idx];
506 if (populated_zone(zone) &&
507 check_dyn_pageflag_zone(pagemap, zone, 0, 0))
508 goto out;
512 result = 0;
514 out:
515 spin_unlock_irqrestore(&pagemap->struct_lock, flags);
516 return result;
518 EXPORT_IF_TOI_MODULAR(allocate_dyn_pageflags);
521 * test_dynpageflag - Test a page in a bitmap.
523 * @bitmap: The bitmap we're checking.
524 * @page: The page for which we want to test the matching bit.
526 * Test whether the bit is on in the array. The array may be sparse,
527 * in which case the result is zero.
529 int test_dynpageflag(struct dyn_pageflags *bitmap, struct page *page)
531 GET_BIT_AND_UL(bitmap, page);
532 return ul ? test_bit(bit, ul) : 0;
534 EXPORT_IF_TOI_MODULAR(test_dynpageflag);
537 * set_dynpageflag - Set a bit in a bitmap.
539 * @bitmap: The bitmap we're operating on.
540 * @page: The page for which we want to set the matching bit.
542 * Set the associated bit in the array. If the array is sparse, we
543 * seek to allocate the missing page.
545 void set_dynpageflag(struct dyn_pageflags *pageflags, struct page *page)
547 GET_BIT_AND_UL(pageflags, page);
549 if (!ul) {
551 * Sparse, hotplugged or unprepared.
552 * Allocate / fill gaps in high levels
554 if (allocate_dyn_pageflags(pageflags, 1) ||
555 populate_bitmap_page(pageflags, 1, (unsigned long **)
556 &pageflags->bitmap[node][zone_num][pagenum])) {
557 printk(KERN_EMERG "Failed to allocate storage in a "
558 "sparse bitmap.\n");
559 dump_pagemap(pageflags);
560 BUG();
562 set_dynpageflag(pageflags, page);
563 } else
564 set_bit(bit, ul);
566 EXPORT_IF_TOI_MODULAR(set_dynpageflag);
569 * clear_dynpageflag - Clear a bit in a bitmap.
571 * @bitmap: The bitmap we're operating on.
572 * @page: The page for which we want to clear the matching bit.
574 * Clear the associated bit in the array. It is not an error to be asked
575 * to clear a bit on a page we haven't allocated.
577 void clear_dynpageflag(struct dyn_pageflags *bitmap, struct page *page)
579 GET_BIT_AND_UL(bitmap, page);
580 if (ul)
581 clear_bit(bit, ul);
583 EXPORT_IF_TOI_MODULAR(clear_dynpageflag);
586 * get_next_bit_on - Get the next bit in a bitmap.
588 * @pageflags: The bitmap we're searching.
589 * @counter: The previous pfn. We always return a value > this.
591 * Given a pfn (possibly max_pfn+1), find the next pfn in the bitmap that
592 * is set. If there are no more flags set, return max_pfn+1.
594 unsigned long get_next_bit_on(struct dyn_pageflags *pageflags,
595 unsigned long counter)
597 struct page *page;
598 struct zone *zone;
599 unsigned long *ul = NULL;
600 unsigned long zone_offset;
601 int pagebit, zone_num, first = (counter == (max_pfn + 1)), node;
603 if (first)
604 counter = first_online_pgdat()->node_zones->zone_start_pfn;
606 page = pfn_to_page(counter);
607 zone = page_zone(page);
608 node = zone->zone_pgdat->node_id;
609 zone_num = zone_idx(zone);
610 zone_offset = counter - ZONE_START(zone);
612 if (first)
613 goto test;
615 do {
616 zone_offset++;
618 if (zone_offset >= zone->spanned_pages) {
619 do {
620 zone = next_zone(zone);
621 if (!zone)
622 return max_pfn + 1;
623 } while (!zone->spanned_pages);
625 zone_num = zone_idx(zone);
626 node = zone->zone_pgdat->node_id;
627 zone_offset = 0;
629 test:
630 pagebit = PAGEBIT(zone_offset);
632 if (!pagebit || !ul) {
633 ul = pageflags->bitmap[node][zone_num]
634 [PAGENUMBER(zone_offset)+2];
635 if (ul)
636 ul += PAGEINDEX(zone_offset);
637 else {
638 PR_DEBUG("Unallocated page. Skipping from zone"
639 " offset %lu to the start of the next "
640 "one.\n", zone_offset);
641 zone_offset = roundup(zone_offset + 1,
642 PAGE_SIZE << 3) - 1;
643 PR_DEBUG("New zone offset is %lu.\n",
644 zone_offset);
645 continue;
649 if (!ul || !(*ul & ~((1 << pagebit) - 1))) {
650 zone_offset += BITS_PER_LONG - pagebit - 1;
651 continue;
654 } while (!ul || !test_bit(pagebit, ul));
656 return ZONE_START(zone) + zone_offset;
658 EXPORT_IF_TOI_MODULAR(get_next_bit_on);
660 #ifdef SELF_TEST
661 #include <linux/jiffies.h>
663 static __init int dyn_pageflags_test(void)
665 struct dyn_pageflags test_map;
666 struct page *test_page1 = pfn_to_page(1);
667 unsigned long pfn = 0, start, end;
668 int i, iterations;
670 memset(&test_map, 0, sizeof(test_map));
672 printk("Dynpageflags testing...\n");
674 printk(KERN_INFO "Set page 1...");
675 set_dynpageflag(&test_map, test_page1);
676 if (test_dynpageflag(&test_map, test_page1))
677 printk(KERN_INFO "Ok.\n");
678 else
679 printk(KERN_INFO "FAILED.\n");
681 printk(KERN_INFO "Test memory hotplugging #1 ...");
683 unsigned long orig_size;
684 GET_BIT_AND_UL(&test_map, test_page1);
685 orig_size = (unsigned long) test_map.bitmap[node][zone_num][1];
687 * Use the code triggered when zone_start_pfn lowers,
688 * checking that our bit is then set in the third page.
690 resize_zone_bitmap(&test_map, zone, orig_size,
691 orig_size + 2, 2);
692 DUMP_DEBUG(&test_map);
693 if ((unsigned long) test_map.bitmap[node][zone_num]
694 [pagenum + 2] &&
695 (unsigned long) test_map.bitmap[node][zone_num]
696 [pagenum + 2][0] == 2UL)
697 printk(KERN_INFO "Ok.\n");
698 else
699 printk(KERN_INFO "FAILED.\n");
702 printk(KERN_INFO "Test memory hotplugging #2 ...");
705 * Test expanding bitmap length.
707 unsigned long orig_size;
708 GET_BIT_AND_UL(&test_map, test_page1);
709 orig_size = (unsigned long) test_map.bitmap[node]
710 [zone_num][1];
711 resize_zone_bitmap(&test_map, zone, orig_size,
712 orig_size + 2, 0);
713 DUMP_DEBUG(&test_map);
714 pagenum += 2; /* Offset for first test */
715 if (test_map.bitmap[node][zone_num][pagenum] &&
716 test_map.bitmap[node][zone_num][pagenum][0] == 2UL &&
717 (unsigned long) test_map.bitmap[node][zone_num][1] ==
718 orig_size + 2)
719 printk(KERN_INFO "Ok.\n");
720 else
721 printk(KERN_INFO "FAILED ([%d][%d][%d]: %p && %lu == "
722 "2UL && %p == %lu).\n",
723 node, zone_num, pagenum,
724 test_map.bitmap[node][zone_num][pagenum],
725 test_map.bitmap[node][zone_num][pagenum] ?
726 test_map.bitmap[node][zone_num][pagenum][0] : 0,
727 test_map.bitmap[node][zone_num][1],
728 orig_size + 2);
731 free_dyn_pageflags(&test_map);
733 allocate_dyn_pageflags(&test_map, 0);
735 start = jiffies;
737 iterations = 25000000 / max_pfn;
739 for (i = 0; i < iterations; i++) {
740 for (pfn = 0; pfn < max_pfn; pfn++)
741 set_dynpageflag(&test_map, pfn_to_page(pfn));
742 for (pfn = 0; pfn < max_pfn; pfn++)
743 clear_dynpageflag(&test_map, pfn_to_page(pfn));
746 end = jiffies;
748 free_dyn_pageflags(&test_map);
750 printk(KERN_INFO "Dyn: %d iterations of setting & clearing all %lu "
751 "flags took %lu jiffies.\n",
752 iterations, max_pfn, end - start);
754 start = jiffies;
756 for (i = 0; i < iterations; i++) {
757 for (pfn = 0; pfn < max_pfn; pfn++)
758 set_bit(7, &(pfn_to_page(pfn))->flags);
759 for (pfn = 0; pfn < max_pfn; pfn++)
760 clear_bit(7, &(pfn_to_page(pfn))->flags);
763 end = jiffies;
765 printk(KERN_INFO "Real flags: %d iterations of setting & clearing "
766 "all %lu flags took %lu jiffies.\n",
767 iterations, max_pfn, end - start);
769 iterations = 25000000;
771 start = jiffies;
773 for (i = 0; i < iterations; i++) {
774 set_dynpageflag(&test_map, pfn_to_page(1));
775 clear_dynpageflag(&test_map, pfn_to_page(1));
778 end = jiffies;
780 printk(KERN_INFO "Dyn: %d iterations of setting & clearing all one "
781 "flag took %lu jiffies.\n", iterations, end - start);
783 start = jiffies;
785 for (i = 0; i < iterations; i++) {
786 set_bit(7, &(pfn_to_page(1))->flags);
787 clear_bit(7, &(pfn_to_page(1))->flags);
790 end = jiffies;
792 printk(KERN_INFO "Real pageflag: %d iterations of setting & clearing "
793 "all one flag took %lu jiffies.\n",
794 iterations, end - start);
795 return 0;
798 late_initcall(dyn_pageflags_test);
799 #endif
801 static int __init dyn_pageflags_debug_setup(char *str)
803 printk(KERN_INFO "Dynamic pageflags debugging enabled.\n");
804 dyn_pageflags_debug = 1;
805 return 1;
808 __setup("dyn_pageflags_debug", dyn_pageflags_debug_setup);