3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
5 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
6 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
7 * Copyright (C) 1996 Paul Mackerras
8 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
9 * PPC44x/36-bit changes by Matt Porter (mporter@mvista.com)
11 * Derived from "arch/i386/mm/init.c"
12 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License
16 * as published by the Free Software Foundation; either version
17 * 2 of the License, or (at your option) any later version.
21 #include <linux/module.h>
22 #include <linux/sched.h>
23 #include <linux/kernel.h>
24 #include <linux/errno.h>
25 #include <linux/string.h>
26 #include <linux/types.h>
28 #include <linux/stddef.h>
29 #include <linux/init.h>
30 #include <linux/bootmem.h>
31 #include <linux/highmem.h>
32 #include <linux/initrd.h>
33 #include <linux/pagemap.h>
35 #include <asm/pgalloc.h>
38 #include <asm/mmu_context.h>
39 #include <asm/pgtable.h>
42 #include <asm/machdep.h>
43 #include <asm/btext.h>
45 #include <asm/bootinfo.h>
47 #include "mem_pieces.h"
50 #if defined(CONFIG_KERNEL_START_BOOL) || defined(CONFIG_LOWMEM_SIZE_BOOL)
51 /* The amount of lowmem must be within 0xF0000000 - KERNELBASE. */
52 #if (CONFIG_LOWMEM_SIZE > (0xF0000000 - KERNELBASE))
53 #error "You must adjust CONFIG_LOWMEM_SIZE or CONFIG_START_KERNEL"
56 #define MAX_LOW_MEM CONFIG_LOWMEM_SIZE
58 DEFINE_PER_CPU(struct mmu_gather
, mmu_gathers
);
60 unsigned long total_memory
;
61 unsigned long total_lowmem
;
63 unsigned long ppc_memstart
;
64 unsigned long ppc_memoffset
= PAGE_OFFSET
;
67 int init_bootmem_done
;
71 extern char etext
[], _stext
[];
72 extern char __init_begin
, __init_end
;
78 EXPORT_SYMBOL(kmap_prot
);
79 EXPORT_SYMBOL(kmap_pte
);
83 void set_phys_avail(unsigned long total_ram
);
85 /* XXX should be in current.h -- paulus */
86 extern struct task_struct
*current_set
[NR_CPUS
];
89 struct mem_pieces phys_avail
;
92 * this tells the system to map all of ram with the segregs
93 * (i.e. page tables) instead of the bats.
96 int __map_without_bats
;
97 int __map_without_ltlbs
;
99 /* max amount of RAM to use */
100 unsigned long __max_memory
;
101 /* max amount of low RAM to map in */
102 unsigned long __max_low_memory
= MAX_LOW_MEM
;
106 int i
,free
= 0,total
= 0,reserved
= 0;
107 int shared
= 0, cached
= 0;
110 printk("Mem-info:\n");
112 printk("Free swap: %6ldkB\n", nr_swap_pages
<<(PAGE_SHIFT
-10));
116 if (PageHighMem(mem_map
+i
))
118 if (PageReserved(mem_map
+i
))
120 else if (PageSwapCache(mem_map
+i
))
122 else if (!page_count(mem_map
+i
))
125 shared
+= page_count(mem_map
+i
) - 1;
127 printk("%d pages of RAM\n",total
);
128 printk("%d pages of HIGHMEM\n", highmem
);
129 printk("%d free pages\n",free
);
130 printk("%d reserved pages\n",reserved
);
131 printk("%d pages shared\n",shared
);
132 printk("%d pages swap cached\n",cached
);
135 /* Free up now-unused memory */
136 static void free_sec(unsigned long start
, unsigned long end
, const char *name
)
138 unsigned long cnt
= 0;
140 while (start
< end
) {
141 ClearPageReserved(virt_to_page(start
));
142 init_page_count(virt_to_page(start
));
148 printk(" %ldk %s", cnt
<< (PAGE_SHIFT
- 10), name
);
149 totalram_pages
+= cnt
;
153 void free_initmem(void)
155 #define FREESEC(TYPE) \
156 free_sec((unsigned long)(&__ ## TYPE ## _begin), \
157 (unsigned long)(&__ ## TYPE ## _end), \
160 printk ("Freeing unused kernel memory:");
163 ppc_md
.progress
= NULL
;
167 #ifdef CONFIG_BLK_DEV_INITRD
168 void free_initrd_mem(unsigned long start
, unsigned long end
)
170 printk ("Freeing initrd memory: %ldk freed\n", (end
- start
) >> 10);
172 for (; start
< end
; start
+= PAGE_SIZE
) {
173 ClearPageReserved(virt_to_page(start
));
174 init_page_count(virt_to_page(start
));
182 * Check for command-line options that affect what MMU_init will do.
186 /* Check for nobats option (used in mapin_ram). */
187 if (strstr(cmd_line
, "nobats")) {
188 __map_without_bats
= 1;
191 if (strstr(cmd_line
, "noltlbs")) {
192 __map_without_ltlbs
= 1;
195 /* Look for mem= option on command line */
196 if (strstr(cmd_line
, "mem=")) {
198 unsigned long maxmem
= 0;
200 for (q
= cmd_line
; (p
= strstr(q
, "mem=")) != 0; ) {
202 if (p
> cmd_line
&& p
[-1] != ' ')
204 maxmem
= simple_strtoul(q
, &q
, 0);
205 if (*q
== 'k' || *q
== 'K') {
208 } else if (*q
== 'm' || *q
== 'M') {
213 __max_memory
= maxmem
;
218 * MMU_init sets up the basic memory mappings for the kernel,
219 * including both RAM and possibly some I/O regions,
220 * and sets up the page tables and the MMU hardware ready to go.
222 void __init
MMU_init(void)
225 ppc_md
.progress("MMU:enter", 0x111);
227 /* parse args from command line */
231 * Figure out how much memory we have, how much
232 * is lowmem, and how much is highmem. If we were
233 * passed the total memory size from the bootloader,
237 total_memory
= boot_mem_size
;
239 total_memory
= ppc_md
.find_end_of_memory();
241 if (__max_memory
&& total_memory
> __max_memory
)
242 total_memory
= __max_memory
;
243 total_lowmem
= total_memory
;
244 #ifdef CONFIG_FSL_BOOKE
245 /* Freescale Book-E parts expect lowmem to be mapped by fixed TLB
246 * entries, so we need to adjust lowmem to match the amount we can map
247 * in the fixed entries */
248 adjust_total_lowmem();
249 #endif /* CONFIG_FSL_BOOKE */
250 if (total_lowmem
> __max_low_memory
) {
251 total_lowmem
= __max_low_memory
;
252 #ifndef CONFIG_HIGHMEM
253 total_memory
= total_lowmem
;
254 #endif /* CONFIG_HIGHMEM */
256 set_phys_avail(total_lowmem
);
258 /* Initialize the MMU hardware */
260 ppc_md
.progress("MMU:hw init", 0x300);
263 /* Map in all of RAM starting at KERNELBASE */
265 ppc_md
.progress("MMU:mapin", 0x301);
268 #ifdef CONFIG_HIGHMEM
269 ioremap_base
= PKMAP_BASE
;
271 ioremap_base
= 0xfe000000UL
; /* for now, could be 0xfffff000 */
272 #endif /* CONFIG_HIGHMEM */
273 ioremap_bot
= ioremap_base
;
275 /* Map in I/O resources */
277 ppc_md
.progress("MMU:setio", 0x302);
278 if (ppc_md
.setup_io_mappings
)
279 ppc_md
.setup_io_mappings();
281 /* Initialize the context management stuff */
285 ppc_md
.progress("MMU:exit", 0x211);
287 #ifdef CONFIG_BOOTX_TEXT
288 /* By default, we are no longer mapped */
289 boot_text_mapped
= 0;
290 /* Must be done last, or ppc_md.progress will die. */
295 /* This is only called until mem_init is done. */
296 void __init
*early_get_page(void)
300 if (init_bootmem_done
) {
301 p
= alloc_bootmem_pages(PAGE_SIZE
);
303 p
= mem_pieces_find(PAGE_SIZE
, PAGE_SIZE
);
309 * Initialize the bootmem system and give it all the memory we
312 void __init
do_init_bootmem(void)
314 unsigned long start
, size
;
318 * Find an area to use for the bootmem bitmap.
319 * We look for the first area which is at least
320 * 128kB in length (128kB is enough for a bitmap
321 * for 4GB of memory, using 4kB pages), plus 1 page
322 * (in case the address isn't page-aligned).
326 for (i
= 0; i
< phys_avail
.n_regions
; ++i
) {
327 unsigned long a
= phys_avail
.regions
[i
].address
;
328 unsigned long s
= phys_avail
.regions
[i
].size
;
333 if (s
>= 33 * PAGE_SIZE
)
336 start
= PAGE_ALIGN(start
);
338 min_low_pfn
= start
>> PAGE_SHIFT
;
339 max_low_pfn
= (PPC_MEMSTART
+ total_lowmem
) >> PAGE_SHIFT
;
340 max_pfn
= (PPC_MEMSTART
+ total_memory
) >> PAGE_SHIFT
;
341 boot_mapsize
= init_bootmem_node(&contig_page_data
, min_low_pfn
,
342 PPC_MEMSTART
>> PAGE_SHIFT
,
345 /* remove the bootmem bitmap from the available memory */
346 mem_pieces_remove(&phys_avail
, start
, boot_mapsize
, 1);
348 /* add everything in phys_avail into the bootmem map */
349 for (i
= 0; i
< phys_avail
.n_regions
; ++i
)
350 free_bootmem(phys_avail
.regions
[i
].address
,
351 phys_avail
.regions
[i
].size
);
353 init_bootmem_done
= 1;
357 * paging_init() sets up the page tables - in fact we've already done this.
359 void __init
paging_init(void)
361 unsigned long start_pfn
, end_pfn
;
362 unsigned long max_zone_pfns
[MAX_NR_ZONES
];
363 #ifdef CONFIG_HIGHMEM
364 map_page(PKMAP_BASE
, 0, 0); /* XXX gross */
365 pkmap_page_table
= pte_offset_kernel(pmd_offset(pgd_offset_k
366 (PKMAP_BASE
), PKMAP_BASE
), PKMAP_BASE
);
367 map_page(KMAP_FIX_BEGIN
, 0, 0); /* XXX gross */
368 kmap_pte
= pte_offset_kernel(pmd_offset(pgd_offset_k
369 (KMAP_FIX_BEGIN
), KMAP_FIX_BEGIN
), KMAP_FIX_BEGIN
);
370 kmap_prot
= PAGE_KERNEL
;
371 #endif /* CONFIG_HIGHMEM */
372 /* All pages are DMA-able so we put them all in the DMA zone. */
373 start_pfn
= __pa(PAGE_OFFSET
) >> PAGE_SHIFT
;
374 end_pfn
= start_pfn
+ (total_memory
>> PAGE_SHIFT
);
375 add_active_range(0, start_pfn
, end_pfn
);
377 memset(max_zone_pfns
, 0, sizeof(max_zone_pfns
));
378 #ifdef CONFIG_HIGHMEM
379 max_zone_pfns
[ZONE_DMA
] = total_lowmem
>> PAGE_SHIFT
;
380 max_zone_pfns
[ZONE_HIGHMEM
] = total_memory
>> PAGE_SHIFT
;
382 max_zone_pfns
[ZONE_DMA
] = total_memory
>> PAGE_SHIFT
;
383 #endif /* CONFIG_HIGHMEM */
384 free_area_init_nodes(max_zone_pfns
);
387 void __init
mem_init(void)
393 #ifdef CONFIG_HIGHMEM
394 unsigned long highmem_mapnr
;
396 highmem_mapnr
= total_lowmem
>> PAGE_SHIFT
;
397 #endif /* CONFIG_HIGHMEM */
398 max_mapnr
= total_memory
>> PAGE_SHIFT
;
400 high_memory
= (void *) __va(PPC_MEMSTART
+ total_lowmem
);
401 num_physpages
= max_mapnr
; /* RAM is assumed contiguous */
403 totalram_pages
+= free_all_bootmem();
405 #ifdef CONFIG_BLK_DEV_INITRD
406 /* if we are booted from BootX with an initial ramdisk,
407 make sure the ramdisk pages aren't reserved. */
409 for (addr
= initrd_start
; addr
< initrd_end
; addr
+= PAGE_SIZE
)
410 ClearPageReserved(virt_to_page(addr
));
412 #endif /* CONFIG_BLK_DEV_INITRD */
414 for (addr
= PAGE_OFFSET
; addr
< (unsigned long)high_memory
;
416 if (!PageReserved(virt_to_page(addr
)))
418 if (addr
< (ulong
) etext
)
420 else if (addr
>= (unsigned long)&__init_begin
421 && addr
< (unsigned long)&__init_end
)
423 else if (addr
< (ulong
) klimit
)
427 #ifdef CONFIG_HIGHMEM
431 for (pfn
= highmem_mapnr
; pfn
< max_mapnr
; ++pfn
) {
432 struct page
*page
= mem_map
+ pfn
;
434 ClearPageReserved(page
);
435 init_page_count(page
);
439 totalram_pages
+= totalhigh_pages
;
441 #endif /* CONFIG_HIGHMEM */
443 printk("Memory: %luk available (%dk kernel code, %dk data, %dk init, %ldk highmem)\n",
444 (unsigned long)nr_free_pages()<< (PAGE_SHIFT
-10),
445 codepages
<< (PAGE_SHIFT
-10), datapages
<< (PAGE_SHIFT
-10),
446 initpages
<< (PAGE_SHIFT
-10),
447 (unsigned long) (totalhigh_pages
<< (PAGE_SHIFT
-10)));
453 * Set phys_avail to the amount of physical memory,
454 * less the kernel text/data/bss.
457 set_phys_avail(unsigned long total_memory
)
459 unsigned long kstart
, ksize
;
462 * Initially, available physical memory is equivalent to all
466 phys_avail
.regions
[0].address
= PPC_MEMSTART
;
467 phys_avail
.regions
[0].size
= total_memory
;
468 phys_avail
.n_regions
= 1;
471 * Map out the kernel text/data/bss from the available physical
475 kstart
= __pa(_stext
); /* should be 0 */
476 ksize
= PAGE_ALIGN(klimit
- _stext
);
478 mem_pieces_remove(&phys_avail
, kstart
, ksize
, 0);
479 mem_pieces_remove(&phys_avail
, 0, 0x4000, 0);
481 #if defined(CONFIG_BLK_DEV_INITRD)
482 /* Remove the init RAM disk from the available memory. */
484 mem_pieces_remove(&phys_avail
, __pa(initrd_start
),
485 initrd_end
- initrd_start
, 1);
487 #endif /* CONFIG_BLK_DEV_INITRD */
490 /* Mark some memory as reserved by removing it from phys_avail. */
491 void __init
reserve_phys_mem(unsigned long start
, unsigned long size
)
493 mem_pieces_remove(&phys_avail
, start
, size
, 1);
497 * This is called when a page has been modified by the kernel.
498 * It just marks the page as not i-cache clean. We do the i-cache
499 * flush later when the page is given to a user process, if necessary.
501 void flush_dcache_page(struct page
*page
)
503 clear_bit(PG_arch_1
, &page
->flags
);
506 void flush_dcache_icache_page(struct page
*page
)
509 void *start
= kmap_atomic(page
, KM_PPC_SYNC_ICACHE
);
510 __flush_dcache_icache(start
);
511 kunmap_atomic(start
, KM_PPC_SYNC_ICACHE
);
512 #elif defined(CONFIG_8xx)
513 /* On 8xx there is no need to kmap since highmem is not supported */
514 __flush_dcache_icache(page_address(page
));
516 __flush_dcache_icache_phys(page_to_pfn(page
) << PAGE_SHIFT
);
520 void clear_user_page(void *page
, unsigned long vaddr
, struct page
*pg
)
523 clear_bit(PG_arch_1
, &pg
->flags
);
526 void copy_user_page(void *vto
, void *vfrom
, unsigned long vaddr
,
529 copy_page(vto
, vfrom
);
530 clear_bit(PG_arch_1
, &pg
->flags
);
533 void flush_icache_user_range(struct vm_area_struct
*vma
, struct page
*page
,
534 unsigned long addr
, int len
)
538 maddr
= (unsigned long) kmap(page
) + (addr
& ~PAGE_MASK
);
539 flush_icache_range(maddr
, maddr
+ len
);
544 * This is called at the end of handling a user page fault, when the
545 * fault has been handled by updating a PTE in the linux page tables.
546 * We use it to preload an HPTE into the hash table corresponding to
547 * the updated linux PTE.
549 void update_mmu_cache(struct vm_area_struct
*vma
, unsigned long address
,
552 /* handle i-cache coherency */
553 unsigned long pfn
= pte_pfn(pte
);
555 if (pfn_valid(pfn
)) {
556 struct page
*page
= pfn_to_page(pfn
);
558 /* On 8xx, the TLB handlers work in 2 stages:
559 * First, a zeroed entry is loaded by TLBMiss handler,
560 * which causes the TLBError handler to be triggered.
561 * That means the zeroed TLB has to be invalidated
562 * whenever a page miss occurs.
564 _tlbie(address
, 0 /* 8xx doesn't care about PID */);
566 if (!PageReserved(page
)
567 && !test_bit(PG_arch_1
, &page
->flags
)) {
568 if (vma
->vm_mm
== current
->active_mm
)
569 __flush_dcache_icache((void *) address
);
571 flush_dcache_icache_page(page
);
572 set_bit(PG_arch_1
, &page
->flags
);
576 #ifdef CONFIG_PPC_STD_MMU
577 /* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
578 if (Hash
!= 0 && pte_young(pte
)) {
579 struct mm_struct
*mm
;
582 mm
= (address
< TASK_SIZE
)? vma
->vm_mm
: &init_mm
;
583 pmd
= pmd_offset(pgd_offset(mm
, address
), address
);
585 add_hash_page(mm
->context
.id
, address
, pmd_val(*pmd
));
591 * This is called by /dev/mem to know if a given address has to
592 * be mapped non-cacheable or not
594 int page_is_ram(unsigned long pfn
)
596 return pfn
< max_pfn
;
599 pgprot_t
phys_mem_access_prot(struct file
*file
, unsigned long pfn
,
600 unsigned long size
, pgprot_t vma_prot
)
602 if (ppc_md
.phys_mem_access_prot
)
603 return ppc_md
.phys_mem_access_prot(file
, pfn
, size
, vma_prot
);
605 if (!page_is_ram(pfn
))
606 vma_prot
= __pgprot(pgprot_val(vma_prot
)
607 | _PAGE_GUARDED
| _PAGE_NO_CACHE
);
610 EXPORT_SYMBOL(phys_mem_access_prot
);