[PARISC] Show more memory information and memory layout at bootup
[linux/fpc-iii.git] / arch / parisc / mm / init.c
blobd9e4ab545b38d11fa14243f04db27014a3a956a7
1 /*
2 * linux/arch/parisc/mm/init.c
4 * Copyright (C) 1995 Linus Torvalds
5 * Copyright 1999 SuSE GmbH
6 * changed by Philipp Rumpf
7 * Copyright 1999 Philipp Rumpf (prumpf@tux.org)
8 * Copyright 2004 Randolph Chung (tausq@debian.org)
9 * Copyright 2006 Helge Deller (deller@gmx.de)
14 #include <linux/module.h>
15 #include <linux/mm.h>
16 #include <linux/bootmem.h>
17 #include <linux/delay.h>
18 #include <linux/init.h>
19 #include <linux/pci.h> /* for hppa_dma_ops and pcxl_dma_ops */
20 #include <linux/initrd.h>
21 #include <linux/swap.h>
22 #include <linux/unistd.h>
23 #include <linux/nodemask.h> /* for node_online_map */
24 #include <linux/pagemap.h> /* for release_pages and page_cache_release */
26 #include <asm/pgalloc.h>
27 #include <asm/pgtable.h>
28 #include <asm/tlb.h>
29 #include <asm/pdc_chassis.h>
30 #include <asm/mmzone.h>
31 #include <asm/sections.h>
33 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
35 extern int data_start;
37 #ifdef CONFIG_DISCONTIGMEM
38 struct node_map_data node_data[MAX_NUMNODES] __read_mostly;
39 bootmem_data_t bmem_data[MAX_NUMNODES] __read_mostly;
40 unsigned char pfnnid_map[PFNNID_MAP_MAX] __read_mostly;
41 #endif
43 static struct resource data_resource = {
44 .name = "Kernel data",
45 .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
48 static struct resource code_resource = {
49 .name = "Kernel code",
50 .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
53 static struct resource pdcdata_resource = {
54 .name = "PDC data (Page Zero)",
55 .start = 0,
56 .end = 0x9ff,
57 .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
60 static struct resource sysram_resources[MAX_PHYSMEM_RANGES] __read_mostly;
62 /* The following array is initialized from the firmware specific
63 * information retrieved in kernel/inventory.c.
66 physmem_range_t pmem_ranges[MAX_PHYSMEM_RANGES] __read_mostly;
67 int npmem_ranges __read_mostly;
69 #ifdef __LP64__
70 #define MAX_MEM (~0UL)
71 #else /* !__LP64__ */
72 #define MAX_MEM (3584U*1024U*1024U)
73 #endif /* !__LP64__ */
75 static unsigned long mem_limit __read_mostly = MAX_MEM;
77 static void __init mem_limit_func(void)
79 char *cp, *end;
80 unsigned long limit;
81 extern char saved_command_line[];
83 /* We need this before __setup() functions are called */
85 limit = MAX_MEM;
86 for (cp = saved_command_line; *cp; ) {
87 if (memcmp(cp, "mem=", 4) == 0) {
88 cp += 4;
89 limit = memparse(cp, &end);
90 if (end != cp)
91 break;
92 cp = end;
93 } else {
94 while (*cp != ' ' && *cp)
95 ++cp;
96 while (*cp == ' ')
97 ++cp;
101 if (limit < mem_limit)
102 mem_limit = limit;
105 #define MAX_GAP (0x40000000UL >> PAGE_SHIFT)
107 static void __init setup_bootmem(void)
109 unsigned long bootmap_size;
110 unsigned long mem_max;
111 unsigned long bootmap_pages;
112 unsigned long bootmap_start_pfn;
113 unsigned long bootmap_pfn;
114 #ifndef CONFIG_DISCONTIGMEM
115 physmem_range_t pmem_holes[MAX_PHYSMEM_RANGES - 1];
116 int npmem_holes;
117 #endif
118 int i, sysram_resource_count;
120 disable_sr_hashing(); /* Turn off space register hashing */
123 * Sort the ranges. Since the number of ranges is typically
124 * small, and performance is not an issue here, just do
125 * a simple insertion sort.
128 for (i = 1; i < npmem_ranges; i++) {
129 int j;
131 for (j = i; j > 0; j--) {
132 unsigned long tmp;
134 if (pmem_ranges[j-1].start_pfn <
135 pmem_ranges[j].start_pfn) {
137 break;
139 tmp = pmem_ranges[j-1].start_pfn;
140 pmem_ranges[j-1].start_pfn = pmem_ranges[j].start_pfn;
141 pmem_ranges[j].start_pfn = tmp;
142 tmp = pmem_ranges[j-1].pages;
143 pmem_ranges[j-1].pages = pmem_ranges[j].pages;
144 pmem_ranges[j].pages = tmp;
148 #ifndef CONFIG_DISCONTIGMEM
150 * Throw out ranges that are too far apart (controlled by
151 * MAX_GAP).
154 for (i = 1; i < npmem_ranges; i++) {
155 if (pmem_ranges[i].start_pfn -
156 (pmem_ranges[i-1].start_pfn +
157 pmem_ranges[i-1].pages) > MAX_GAP) {
158 npmem_ranges = i;
159 printk("Large gap in memory detected (%ld pages). "
160 "Consider turning on CONFIG_DISCONTIGMEM\n",
161 pmem_ranges[i].start_pfn -
162 (pmem_ranges[i-1].start_pfn +
163 pmem_ranges[i-1].pages));
164 break;
167 #endif
169 if (npmem_ranges > 1) {
171 /* Print the memory ranges */
173 printk(KERN_INFO "Memory Ranges:\n");
175 for (i = 0; i < npmem_ranges; i++) {
176 unsigned long start;
177 unsigned long size;
179 size = (pmem_ranges[i].pages << PAGE_SHIFT);
180 start = (pmem_ranges[i].start_pfn << PAGE_SHIFT);
181 printk(KERN_INFO "%2d) Start 0x%016lx End 0x%016lx Size %6ld MB\n",
182 i,start, start + (size - 1), size >> 20);
186 sysram_resource_count = npmem_ranges;
187 for (i = 0; i < sysram_resource_count; i++) {
188 struct resource *res = &sysram_resources[i];
189 res->name = "System RAM";
190 res->start = pmem_ranges[i].start_pfn << PAGE_SHIFT;
191 res->end = res->start + (pmem_ranges[i].pages << PAGE_SHIFT)-1;
192 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
193 request_resource(&iomem_resource, res);
197 * For 32 bit kernels we limit the amount of memory we can
198 * support, in order to preserve enough kernel address space
199 * for other purposes. For 64 bit kernels we don't normally
200 * limit the memory, but this mechanism can be used to
201 * artificially limit the amount of memory (and it is written
202 * to work with multiple memory ranges).
205 mem_limit_func(); /* check for "mem=" argument */
207 mem_max = 0;
208 num_physpages = 0;
209 for (i = 0; i < npmem_ranges; i++) {
210 unsigned long rsize;
212 rsize = pmem_ranges[i].pages << PAGE_SHIFT;
213 if ((mem_max + rsize) > mem_limit) {
214 printk(KERN_WARNING "Memory truncated to %ld MB\n", mem_limit >> 20);
215 if (mem_max == mem_limit)
216 npmem_ranges = i;
217 else {
218 pmem_ranges[i].pages = (mem_limit >> PAGE_SHIFT)
219 - (mem_max >> PAGE_SHIFT);
220 npmem_ranges = i + 1;
221 mem_max = mem_limit;
223 num_physpages += pmem_ranges[i].pages;
224 break;
226 num_physpages += pmem_ranges[i].pages;
227 mem_max += rsize;
230 printk(KERN_INFO "Total Memory: %ld MB\n",mem_max >> 20);
232 #ifndef CONFIG_DISCONTIGMEM
233 /* Merge the ranges, keeping track of the holes */
236 unsigned long end_pfn;
237 unsigned long hole_pages;
239 npmem_holes = 0;
240 end_pfn = pmem_ranges[0].start_pfn + pmem_ranges[0].pages;
241 for (i = 1; i < npmem_ranges; i++) {
243 hole_pages = pmem_ranges[i].start_pfn - end_pfn;
244 if (hole_pages) {
245 pmem_holes[npmem_holes].start_pfn = end_pfn;
246 pmem_holes[npmem_holes++].pages = hole_pages;
247 end_pfn += hole_pages;
249 end_pfn += pmem_ranges[i].pages;
252 pmem_ranges[0].pages = end_pfn - pmem_ranges[0].start_pfn;
253 npmem_ranges = 1;
255 #endif
257 bootmap_pages = 0;
258 for (i = 0; i < npmem_ranges; i++)
259 bootmap_pages += bootmem_bootmap_pages(pmem_ranges[i].pages);
261 bootmap_start_pfn = PAGE_ALIGN(__pa((unsigned long) &_end)) >> PAGE_SHIFT;
263 #ifdef CONFIG_DISCONTIGMEM
264 for (i = 0; i < MAX_PHYSMEM_RANGES; i++) {
265 memset(NODE_DATA(i), 0, sizeof(pg_data_t));
266 NODE_DATA(i)->bdata = &bmem_data[i];
268 memset(pfnnid_map, 0xff, sizeof(pfnnid_map));
270 for (i = 0; i < npmem_ranges; i++)
271 node_set_online(i);
272 #endif
275 * Initialize and free the full range of memory in each range.
276 * Note that the only writing these routines do are to the bootmap,
277 * and we've made sure to locate the bootmap properly so that they
278 * won't be writing over anything important.
281 bootmap_pfn = bootmap_start_pfn;
282 max_pfn = 0;
283 for (i = 0; i < npmem_ranges; i++) {
284 unsigned long start_pfn;
285 unsigned long npages;
287 start_pfn = pmem_ranges[i].start_pfn;
288 npages = pmem_ranges[i].pages;
290 bootmap_size = init_bootmem_node(NODE_DATA(i),
291 bootmap_pfn,
292 start_pfn,
293 (start_pfn + npages) );
294 free_bootmem_node(NODE_DATA(i),
295 (start_pfn << PAGE_SHIFT),
296 (npages << PAGE_SHIFT) );
297 bootmap_pfn += (bootmap_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
298 if ((start_pfn + npages) > max_pfn)
299 max_pfn = start_pfn + npages;
302 /* IOMMU is always used to access "high mem" on those boxes
303 * that can support enough mem that a PCI device couldn't
304 * directly DMA to any physical addresses.
305 * ISA DMA support will need to revisit this.
307 max_low_pfn = max_pfn;
309 if ((bootmap_pfn - bootmap_start_pfn) != bootmap_pages) {
310 printk(KERN_WARNING "WARNING! bootmap sizing is messed up!\n");
311 BUG();
314 /* reserve PAGE0 pdc memory, kernel text/data/bss & bootmap */
316 #define PDC_CONSOLE_IO_IODC_SIZE 32768
318 reserve_bootmem_node(NODE_DATA(0), 0UL,
319 (unsigned long)(PAGE0->mem_free + PDC_CONSOLE_IO_IODC_SIZE));
320 reserve_bootmem_node(NODE_DATA(0), __pa((unsigned long)_text),
321 (unsigned long)(_end - _text));
322 reserve_bootmem_node(NODE_DATA(0), (bootmap_start_pfn << PAGE_SHIFT),
323 ((bootmap_pfn - bootmap_start_pfn) << PAGE_SHIFT));
325 #ifndef CONFIG_DISCONTIGMEM
327 /* reserve the holes */
329 for (i = 0; i < npmem_holes; i++) {
330 reserve_bootmem_node(NODE_DATA(0),
331 (pmem_holes[i].start_pfn << PAGE_SHIFT),
332 (pmem_holes[i].pages << PAGE_SHIFT));
334 #endif
336 #ifdef CONFIG_BLK_DEV_INITRD
337 if (initrd_start) {
338 printk(KERN_INFO "initrd: %08lx-%08lx\n", initrd_start, initrd_end);
339 if (__pa(initrd_start) < mem_max) {
340 unsigned long initrd_reserve;
342 if (__pa(initrd_end) > mem_max) {
343 initrd_reserve = mem_max - __pa(initrd_start);
344 } else {
345 initrd_reserve = initrd_end - initrd_start;
347 initrd_below_start_ok = 1;
348 printk(KERN_INFO "initrd: reserving %08lx-%08lx (mem_max %08lx)\n", __pa(initrd_start), __pa(initrd_start) + initrd_reserve, mem_max);
350 reserve_bootmem_node(NODE_DATA(0),__pa(initrd_start), initrd_reserve);
353 #endif
355 data_resource.start = virt_to_phys(&data_start);
356 data_resource.end = virt_to_phys(_end) - 1;
357 code_resource.start = virt_to_phys(_text);
358 code_resource.end = virt_to_phys(&data_start)-1;
360 /* We don't know which region the kernel will be in, so try
361 * all of them.
363 for (i = 0; i < sysram_resource_count; i++) {
364 struct resource *res = &sysram_resources[i];
365 request_resource(res, &code_resource);
366 request_resource(res, &data_resource);
368 request_resource(&sysram_resources[0], &pdcdata_resource);
371 void free_initmem(void)
373 unsigned long addr, init_begin, init_end;
375 printk(KERN_INFO "Freeing unused kernel memory: ");
377 #ifdef CONFIG_DEBUG_KERNEL
378 /* Attempt to catch anyone trying to execute code here
379 * by filling the page with BRK insns.
381 * If we disable interrupts for all CPUs, then IPI stops working.
382 * Kinda breaks the global cache flushing.
384 local_irq_disable();
386 memset(__init_begin, 0x00,
387 (unsigned long)__init_end - (unsigned long)__init_begin);
389 flush_data_cache();
390 asm volatile("sync" : : );
391 flush_icache_range((unsigned long)__init_begin, (unsigned long)__init_end);
392 asm volatile("sync" : : );
394 local_irq_enable();
395 #endif
397 /* align __init_begin and __init_end to page size,
398 ignoring linker script where we might have tried to save RAM */
399 init_begin = PAGE_ALIGN((unsigned long)(__init_begin));
400 init_end = PAGE_ALIGN((unsigned long)(__init_end));
401 for (addr = init_begin; addr < init_end; addr += PAGE_SIZE) {
402 ClearPageReserved(virt_to_page(addr));
403 init_page_count(virt_to_page(addr));
404 free_page(addr);
405 num_physpages++;
406 totalram_pages++;
409 /* set up a new led state on systems shipped LED State panel */
410 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_BCOMPLETE);
412 printk("%luk freed\n", (init_end - init_begin) >> 10);
416 #ifdef CONFIG_DEBUG_RODATA
417 void mark_rodata_ro(void)
419 /* rodata memory was already mapped with KERNEL_RO access rights by
420 pagetable_init() and map_pages(). No need to do additional stuff here */
421 printk (KERN_INFO "Write protecting the kernel read-only data: %luk\n",
422 (unsigned long)(__end_rodata - __start_rodata) >> 10);
424 #endif
428 * Just an arbitrary offset to serve as a "hole" between mapping areas
429 * (between top of physical memory and a potential pcxl dma mapping
430 * area, and below the vmalloc mapping area).
432 * The current 32K value just means that there will be a 32K "hole"
433 * between mapping areas. That means that any out-of-bounds memory
434 * accesses will hopefully be caught. The vmalloc() routines leaves
435 * a hole of 4kB between each vmalloced area for the same reason.
438 /* Leave room for gateway page expansion */
439 #if KERNEL_MAP_START < GATEWAY_PAGE_SIZE
440 #error KERNEL_MAP_START is in gateway reserved region
441 #endif
442 #define MAP_START (KERNEL_MAP_START)
444 #define VM_MAP_OFFSET (32*1024)
445 #define SET_MAP_OFFSET(x) ((void *)(((unsigned long)(x) + VM_MAP_OFFSET) \
446 & ~(VM_MAP_OFFSET-1)))
448 void *vmalloc_start __read_mostly;
449 EXPORT_SYMBOL(vmalloc_start);
451 #ifdef CONFIG_PA11
452 unsigned long pcxl_dma_start __read_mostly;
453 #endif
455 void __init mem_init(void)
457 int codesize, reservedpages, datasize, initsize;
458 int tmp;
460 high_memory = __va((max_pfn << PAGE_SHIFT));
462 #ifndef CONFIG_DISCONTIGMEM
463 max_mapnr = page_to_pfn(virt_to_page(high_memory - 1)) + 1;
464 totalram_pages += free_all_bootmem();
465 #else
467 int i;
469 for (i = 0; i < npmem_ranges; i++)
470 totalram_pages += free_all_bootmem_node(NODE_DATA(i));
472 #endif
473 codesize = (unsigned long) &_etext - (unsigned long) &_text;
474 datasize = (unsigned long) &_edata - (unsigned long) &_etext;
475 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
477 reservedpages = 0;
478 for (tmp = 0; tmp < max_low_pfn; tmp++)
480 * Only count reserved RAM pages
482 if (PageReserved(pfn_to_page(tmp)))
483 reservedpages++;
485 #ifdef CONFIG_PA11
486 if (hppa_dma_ops == &pcxl_dma_ops) {
487 pcxl_dma_start = (unsigned long)SET_MAP_OFFSET(MAP_START);
488 vmalloc_start = SET_MAP_OFFSET(pcxl_dma_start + PCXL_DMA_MAP_SIZE);
489 } else {
490 pcxl_dma_start = 0;
491 vmalloc_start = SET_MAP_OFFSET(MAP_START);
493 #else
494 vmalloc_start = SET_MAP_OFFSET(MAP_START);
495 #endif
497 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init, %ldk highmem)\n",
498 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
499 num_physpages << (PAGE_SHIFT-10),
500 codesize >> 10,
501 reservedpages << (PAGE_SHIFT-10),
502 datasize >> 10,
503 initsize >> 10,
504 (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10))
507 #ifdef CONFIG_DEBUG_KERNEL /* double-sanity-check paranoia */
508 printk("virtual kernel memory layout:\n"
509 " vmalloc : 0x%p - 0x%p (%4ld MB)\n"
510 " lowmem : 0x%p - 0x%p (%4ld MB)\n"
511 " .init : 0x%p - 0x%p (%4ld kB)\n"
512 " .data : 0x%p - 0x%p (%4ld kB)\n"
513 " .text : 0x%p - 0x%p (%4ld kB)\n",
515 (void*)VMALLOC_START, (void*)VMALLOC_END,
516 (VMALLOC_END - VMALLOC_START) >> 20,
518 __va(0), high_memory,
519 ((unsigned long)high_memory - (unsigned long)__va(0)) >> 20,
521 &__init_begin, &__init_end,
522 ((unsigned long)&__init_end - (unsigned long)&__init_begin) >> 10,
524 &_etext, &_edata,
525 ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
527 &_text, &_etext,
528 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
529 #endif
532 unsigned long *empty_zero_page __read_mostly;
534 void show_mem(void)
536 int i,free = 0,total = 0,reserved = 0;
537 int shared = 0, cached = 0;
539 printk(KERN_INFO "Mem-info:\n");
540 show_free_areas();
541 printk(KERN_INFO "Free swap: %6ldkB\n",
542 nr_swap_pages<<(PAGE_SHIFT-10));
543 #ifndef CONFIG_DISCONTIGMEM
544 i = max_mapnr;
545 while (i-- > 0) {
546 total++;
547 if (PageReserved(mem_map+i))
548 reserved++;
549 else if (PageSwapCache(mem_map+i))
550 cached++;
551 else if (!page_count(&mem_map[i]))
552 free++;
553 else
554 shared += page_count(&mem_map[i]) - 1;
556 #else
557 for (i = 0; i < npmem_ranges; i++) {
558 int j;
560 for (j = node_start_pfn(i); j < node_end_pfn(i); j++) {
561 struct page *p;
562 unsigned long flags;
564 pgdat_resize_lock(NODE_DATA(i), &flags);
565 p = nid_page_nr(i, j) - node_start_pfn(i);
567 total++;
568 if (PageReserved(p))
569 reserved++;
570 else if (PageSwapCache(p))
571 cached++;
572 else if (!page_count(p))
573 free++;
574 else
575 shared += page_count(p) - 1;
576 pgdat_resize_unlock(NODE_DATA(i), &flags);
579 #endif
580 printk(KERN_INFO "%d pages of RAM\n", total);
581 printk(KERN_INFO "%d reserved pages\n", reserved);
582 printk(KERN_INFO "%d pages shared\n", shared);
583 printk(KERN_INFO "%d pages swap cached\n", cached);
586 #ifdef CONFIG_DISCONTIGMEM
588 struct zonelist *zl;
589 int i, j, k;
591 for (i = 0; i < npmem_ranges; i++) {
592 for (j = 0; j < MAX_NR_ZONES; j++) {
593 zl = NODE_DATA(i)->node_zonelists + j;
595 printk("Zone list for zone %d on node %d: ", j, i);
596 for (k = 0; zl->zones[k] != NULL; k++)
597 printk("[%d/%s] ", zone_to_nid(zl->zones[k]), zl->zones[k]->name);
598 printk("\n");
602 #endif
606 static void __init map_pages(unsigned long start_vaddr, unsigned long start_paddr, unsigned long size, pgprot_t pgprot)
608 pgd_t *pg_dir;
609 pmd_t *pmd;
610 pte_t *pg_table;
611 unsigned long end_paddr;
612 unsigned long start_pmd;
613 unsigned long start_pte;
614 unsigned long tmp1;
615 unsigned long tmp2;
616 unsigned long address;
617 unsigned long ro_start;
618 unsigned long ro_end;
619 unsigned long fv_addr;
620 unsigned long gw_addr;
621 extern const unsigned long fault_vector_20;
622 extern void * const linux_gateway_page;
624 ro_start = __pa((unsigned long)_text);
625 ro_end = __pa((unsigned long)&data_start);
626 fv_addr = __pa((unsigned long)&fault_vector_20) & PAGE_MASK;
627 gw_addr = __pa((unsigned long)&linux_gateway_page) & PAGE_MASK;
629 end_paddr = start_paddr + size;
631 pg_dir = pgd_offset_k(start_vaddr);
633 #if PTRS_PER_PMD == 1
634 start_pmd = 0;
635 #else
636 start_pmd = ((start_vaddr >> PMD_SHIFT) & (PTRS_PER_PMD - 1));
637 #endif
638 start_pte = ((start_vaddr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
640 address = start_paddr;
641 while (address < end_paddr) {
642 #if PTRS_PER_PMD == 1
643 pmd = (pmd_t *)__pa(pg_dir);
644 #else
645 pmd = (pmd_t *)pgd_address(*pg_dir);
648 * pmd is physical at this point
651 if (!pmd) {
652 pmd = (pmd_t *) alloc_bootmem_low_pages_node(NODE_DATA(0),PAGE_SIZE << PMD_ORDER);
653 pmd = (pmd_t *) __pa(pmd);
656 pgd_populate(NULL, pg_dir, __va(pmd));
657 #endif
658 pg_dir++;
660 /* now change pmd to kernel virtual addresses */
662 pmd = (pmd_t *)__va(pmd) + start_pmd;
663 for (tmp1 = start_pmd; tmp1 < PTRS_PER_PMD; tmp1++,pmd++) {
666 * pg_table is physical at this point
669 pg_table = (pte_t *)pmd_address(*pmd);
670 if (!pg_table) {
671 pg_table = (pte_t *)
672 alloc_bootmem_low_pages_node(NODE_DATA(0),PAGE_SIZE);
673 pg_table = (pte_t *) __pa(pg_table);
676 pmd_populate_kernel(NULL, pmd, __va(pg_table));
678 /* now change pg_table to kernel virtual addresses */
680 pg_table = (pte_t *) __va(pg_table) + start_pte;
681 for (tmp2 = start_pte; tmp2 < PTRS_PER_PTE; tmp2++,pg_table++) {
682 pte_t pte;
685 * Map the fault vector writable so we can
686 * write the HPMC checksum.
688 #if defined(CONFIG_PARISC_PAGE_SIZE_4KB)
689 if (address >= ro_start && address < ro_end
690 && address != fv_addr
691 && address != gw_addr)
692 pte = __mk_pte(address, PAGE_KERNEL_RO);
693 else
694 #endif
695 pte = __mk_pte(address, pgprot);
697 if (address >= end_paddr)
698 pte_val(pte) = 0;
700 set_pte(pg_table, pte);
702 address += PAGE_SIZE;
704 start_pte = 0;
706 if (address >= end_paddr)
707 break;
709 start_pmd = 0;
714 * pagetable_init() sets up the page tables
716 * Note that gateway_init() places the Linux gateway page at page 0.
717 * Since gateway pages cannot be dereferenced this has the desirable
718 * side effect of trapping those pesky NULL-reference errors in the
719 * kernel.
721 static void __init pagetable_init(void)
723 int range;
725 /* Map each physical memory range to its kernel vaddr */
727 for (range = 0; range < npmem_ranges; range++) {
728 unsigned long start_paddr;
729 unsigned long end_paddr;
730 unsigned long size;
732 start_paddr = pmem_ranges[range].start_pfn << PAGE_SHIFT;
733 end_paddr = start_paddr + (pmem_ranges[range].pages << PAGE_SHIFT);
734 size = pmem_ranges[range].pages << PAGE_SHIFT;
736 map_pages((unsigned long)__va(start_paddr), start_paddr,
737 size, PAGE_KERNEL);
740 #ifdef CONFIG_BLK_DEV_INITRD
741 if (initrd_end && initrd_end > mem_limit) {
742 printk(KERN_INFO "initrd: mapping %08lx-%08lx\n", initrd_start, initrd_end);
743 map_pages(initrd_start, __pa(initrd_start),
744 initrd_end - initrd_start, PAGE_KERNEL);
746 #endif
748 empty_zero_page = alloc_bootmem_pages(PAGE_SIZE);
749 memset(empty_zero_page, 0, PAGE_SIZE);
752 static void __init gateway_init(void)
754 unsigned long linux_gateway_page_addr;
755 /* FIXME: This is 'const' in order to trick the compiler
756 into not treating it as DP-relative data. */
757 extern void * const linux_gateway_page;
759 linux_gateway_page_addr = LINUX_GATEWAY_ADDR & PAGE_MASK;
762 * Setup Linux Gateway page.
764 * The Linux gateway page will reside in kernel space (on virtual
765 * page 0), so it doesn't need to be aliased into user space.
768 map_pages(linux_gateway_page_addr, __pa(&linux_gateway_page),
769 PAGE_SIZE, PAGE_GATEWAY);
772 #ifdef CONFIG_HPUX
773 void
774 map_hpux_gateway_page(struct task_struct *tsk, struct mm_struct *mm)
776 pgd_t *pg_dir;
777 pmd_t *pmd;
778 pte_t *pg_table;
779 unsigned long start_pmd;
780 unsigned long start_pte;
781 unsigned long address;
782 unsigned long hpux_gw_page_addr;
783 /* FIXME: This is 'const' in order to trick the compiler
784 into not treating it as DP-relative data. */
785 extern void * const hpux_gateway_page;
787 hpux_gw_page_addr = HPUX_GATEWAY_ADDR & PAGE_MASK;
790 * Setup HP-UX Gateway page.
792 * The HP-UX gateway page resides in the user address space,
793 * so it needs to be aliased into each process.
796 pg_dir = pgd_offset(mm,hpux_gw_page_addr);
798 #if PTRS_PER_PMD == 1
799 start_pmd = 0;
800 #else
801 start_pmd = ((hpux_gw_page_addr >> PMD_SHIFT) & (PTRS_PER_PMD - 1));
802 #endif
803 start_pte = ((hpux_gw_page_addr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
805 address = __pa(&hpux_gateway_page);
806 #if PTRS_PER_PMD == 1
807 pmd = (pmd_t *)__pa(pg_dir);
808 #else
809 pmd = (pmd_t *) pgd_address(*pg_dir);
812 * pmd is physical at this point
815 if (!pmd) {
816 pmd = (pmd_t *) get_zeroed_page(GFP_KERNEL);
817 pmd = (pmd_t *) __pa(pmd);
820 __pgd_val_set(*pg_dir, PxD_FLAG_PRESENT | PxD_FLAG_VALID | (unsigned long) pmd);
821 #endif
822 /* now change pmd to kernel virtual addresses */
824 pmd = (pmd_t *)__va(pmd) + start_pmd;
827 * pg_table is physical at this point
830 pg_table = (pte_t *) pmd_address(*pmd);
831 if (!pg_table)
832 pg_table = (pte_t *) __pa(get_zeroed_page(GFP_KERNEL));
834 __pmd_val_set(*pmd, PxD_FLAG_PRESENT | PxD_FLAG_VALID | (unsigned long) pg_table);
836 /* now change pg_table to kernel virtual addresses */
838 pg_table = (pte_t *) __va(pg_table) + start_pte;
839 set_pte(pg_table, __mk_pte(address, PAGE_GATEWAY));
841 EXPORT_SYMBOL(map_hpux_gateway_page);
842 #endif
844 void __init paging_init(void)
846 int i;
848 setup_bootmem();
849 pagetable_init();
850 gateway_init();
851 flush_cache_all_local(); /* start with known state */
852 flush_tlb_all_local(NULL);
854 for (i = 0; i < npmem_ranges; i++) {
855 unsigned long zones_size[MAX_NR_ZONES] = { 0, };
857 /* We have an IOMMU, so all memory can go into a single
858 ZONE_DMA zone. */
859 zones_size[ZONE_DMA] = pmem_ranges[i].pages;
861 #ifdef CONFIG_DISCONTIGMEM
862 /* Need to initialize the pfnnid_map before we can initialize
863 the zone */
865 int j;
866 for (j = (pmem_ranges[i].start_pfn >> PFNNID_SHIFT);
867 j <= ((pmem_ranges[i].start_pfn + pmem_ranges[i].pages) >> PFNNID_SHIFT);
868 j++) {
869 pfnnid_map[j] = i;
872 #endif
874 free_area_init_node(i, NODE_DATA(i), zones_size,
875 pmem_ranges[i].start_pfn, NULL);
879 #ifdef CONFIG_PA20
882 * Currently, all PA20 chips have 18 bit protection id's, which is the
883 * limiting factor (space ids are 32 bits).
886 #define NR_SPACE_IDS 262144
888 #else
891 * Currently we have a one-to-one relationship between space id's and
892 * protection id's. Older parisc chips (PCXS, PCXT, PCXL, PCXL2) only
893 * support 15 bit protection id's, so that is the limiting factor.
894 * PCXT' has 18 bit protection id's, but only 16 bit spaceids, so it's
895 * probably not worth the effort for a special case here.
898 #define NR_SPACE_IDS 32768
900 #endif /* !CONFIG_PA20 */
902 #define RECYCLE_THRESHOLD (NR_SPACE_IDS / 2)
903 #define SID_ARRAY_SIZE (NR_SPACE_IDS / (8 * sizeof(long)))
905 static unsigned long space_id[SID_ARRAY_SIZE] = { 1 }; /* disallow space 0 */
906 static unsigned long dirty_space_id[SID_ARRAY_SIZE];
907 static unsigned long space_id_index;
908 static unsigned long free_space_ids = NR_SPACE_IDS - 1;
909 static unsigned long dirty_space_ids = 0;
911 static DEFINE_SPINLOCK(sid_lock);
913 unsigned long alloc_sid(void)
915 unsigned long index;
917 spin_lock(&sid_lock);
919 if (free_space_ids == 0) {
920 if (dirty_space_ids != 0) {
921 spin_unlock(&sid_lock);
922 flush_tlb_all(); /* flush_tlb_all() calls recycle_sids() */
923 spin_lock(&sid_lock);
925 BUG_ON(free_space_ids == 0);
928 free_space_ids--;
930 index = find_next_zero_bit(space_id, NR_SPACE_IDS, space_id_index);
931 space_id[index >> SHIFT_PER_LONG] |= (1L << (index & (BITS_PER_LONG - 1)));
932 space_id_index = index;
934 spin_unlock(&sid_lock);
936 return index << SPACEID_SHIFT;
939 void free_sid(unsigned long spaceid)
941 unsigned long index = spaceid >> SPACEID_SHIFT;
942 unsigned long *dirty_space_offset;
944 dirty_space_offset = dirty_space_id + (index >> SHIFT_PER_LONG);
945 index &= (BITS_PER_LONG - 1);
947 spin_lock(&sid_lock);
949 BUG_ON(*dirty_space_offset & (1L << index)); /* attempt to free space id twice */
951 *dirty_space_offset |= (1L << index);
952 dirty_space_ids++;
954 spin_unlock(&sid_lock);
958 #ifdef CONFIG_SMP
959 static void get_dirty_sids(unsigned long *ndirtyptr,unsigned long *dirty_array)
961 int i;
963 /* NOTE: sid_lock must be held upon entry */
965 *ndirtyptr = dirty_space_ids;
966 if (dirty_space_ids != 0) {
967 for (i = 0; i < SID_ARRAY_SIZE; i++) {
968 dirty_array[i] = dirty_space_id[i];
969 dirty_space_id[i] = 0;
971 dirty_space_ids = 0;
974 return;
977 static void recycle_sids(unsigned long ndirty,unsigned long *dirty_array)
979 int i;
981 /* NOTE: sid_lock must be held upon entry */
983 if (ndirty != 0) {
984 for (i = 0; i < SID_ARRAY_SIZE; i++) {
985 space_id[i] ^= dirty_array[i];
988 free_space_ids += ndirty;
989 space_id_index = 0;
993 #else /* CONFIG_SMP */
995 static void recycle_sids(void)
997 int i;
999 /* NOTE: sid_lock must be held upon entry */
1001 if (dirty_space_ids != 0) {
1002 for (i = 0; i < SID_ARRAY_SIZE; i++) {
1003 space_id[i] ^= dirty_space_id[i];
1004 dirty_space_id[i] = 0;
1007 free_space_ids += dirty_space_ids;
1008 dirty_space_ids = 0;
1009 space_id_index = 0;
1012 #endif
1015 * flush_tlb_all() calls recycle_sids(), since whenever the entire tlb is
1016 * purged, we can safely reuse the space ids that were released but
1017 * not flushed from the tlb.
1020 #ifdef CONFIG_SMP
1022 static unsigned long recycle_ndirty;
1023 static unsigned long recycle_dirty_array[SID_ARRAY_SIZE];
1024 static unsigned int recycle_inuse;
1026 void flush_tlb_all(void)
1028 int do_recycle;
1030 do_recycle = 0;
1031 spin_lock(&sid_lock);
1032 if (dirty_space_ids > RECYCLE_THRESHOLD) {
1033 BUG_ON(recycle_inuse); /* FIXME: Use a semaphore/wait queue here */
1034 get_dirty_sids(&recycle_ndirty,recycle_dirty_array);
1035 recycle_inuse++;
1036 do_recycle++;
1038 spin_unlock(&sid_lock);
1039 on_each_cpu(flush_tlb_all_local, NULL, 1, 1);
1040 if (do_recycle) {
1041 spin_lock(&sid_lock);
1042 recycle_sids(recycle_ndirty,recycle_dirty_array);
1043 recycle_inuse = 0;
1044 spin_unlock(&sid_lock);
1047 #else
1048 void flush_tlb_all(void)
1050 spin_lock(&sid_lock);
1051 flush_tlb_all_local(NULL);
1052 recycle_sids();
1053 spin_unlock(&sid_lock);
1055 #endif
1057 #ifdef CONFIG_BLK_DEV_INITRD
1058 void free_initrd_mem(unsigned long start, unsigned long end)
1060 if (start >= end)
1061 return;
1062 printk(KERN_INFO "Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
1063 for (; start < end; start += PAGE_SIZE) {
1064 ClearPageReserved(virt_to_page(start));
1065 init_page_count(virt_to_page(start));
1066 free_page(start);
1067 num_physpages++;
1068 totalram_pages++;
1071 #endif