2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 1994 - 2000 Ralf Baechle
7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
9 * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
11 #include <linux/bug.h>
12 #include <linux/init.h>
13 #include <linux/module.h>
14 #include <linux/signal.h>
15 #include <linux/sched.h>
16 #include <linux/kernel.h>
17 #include <linux/errno.h>
18 #include <linux/string.h>
19 #include <linux/types.h>
20 #include <linux/pagemap.h>
21 #include <linux/ptrace.h>
22 #include <linux/mman.h>
24 #include <linux/bootmem.h>
25 #include <linux/highmem.h>
26 #include <linux/swap.h>
27 #include <linux/proc_fs.h>
28 #include <linux/pfn.h>
30 #include <asm/asm-offsets.h>
31 #include <asm/bootinfo.h>
32 #include <asm/cachectl.h>
35 #include <asm/kmap_types.h>
36 #include <asm/mmu_context.h>
37 #include <asm/sections.h>
38 #include <asm/pgtable.h>
39 #include <asm/pgalloc.h>
41 #include <asm/fixmap.h>
43 /* Atomicity and interruptability */
44 #ifdef CONFIG_MIPS_MT_SMTC
46 #include <asm/mipsmtregs.h>
48 #define ENTER_CRITICAL(flags) \
50 unsigned int mvpflags; \
51 local_irq_save(flags);\
53 #define EXIT_CRITICAL(flags) \
55 local_irq_restore(flags); \
59 #define ENTER_CRITICAL(flags) local_irq_save(flags)
60 #define EXIT_CRITICAL(flags) local_irq_restore(flags)
62 #endif /* CONFIG_MIPS_MT_SMTC */
64 DEFINE_PER_CPU(struct mmu_gather
, mmu_gathers
);
67 * We have up to 8 empty zeroed pages so we can map one of the right colour
68 * when needed. This is necessary only on R4000 / R4400 SC and MC versions
69 * where we have to avoid VCED / VECI exceptions for good performance at
70 * any price. Since page is never written to after the initialization we
71 * don't have to care about aliases on other CPUs.
73 unsigned long empty_zero_page
, zero_page_mask
;
76 * Not static inline because used by IP27 special magic initialization code
78 unsigned long setup_zero_pages(void)
89 empty_zero_page
= __get_free_pages(GFP_KERNEL
| __GFP_ZERO
, order
);
91 panic("Oh boy, that early out of memory?");
93 page
= virt_to_page((void *)empty_zero_page
);
94 split_page(page
, order
);
95 while (page
< virt_to_page((void *)(empty_zero_page
+ (PAGE_SIZE
<< order
)))) {
96 SetPageReserved(page
);
100 size
= PAGE_SIZE
<< order
;
101 zero_page_mask
= (size
- 1) & PAGE_MASK
;
107 * These are almost like kmap_atomic / kunmap_atmic except they take an
108 * additional address argument as the hint.
111 #define kmap_get_fixmap_pte(vaddr) \
112 pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)), (vaddr))
114 #ifdef CONFIG_MIPS_MT_SMTC
115 static pte_t
*kmap_coherent_pte
;
116 static void __init
kmap_coherent_init(void)
120 /* cache the first coherent kmap pte */
121 vaddr
= __fix_to_virt(FIX_CMAP_BEGIN
);
122 kmap_coherent_pte
= kmap_get_fixmap_pte(vaddr
);
125 static inline void kmap_coherent_init(void) {}
128 void *kmap_coherent(struct page
*page
, unsigned long addr
)
130 enum fixed_addresses idx
;
131 unsigned long vaddr
, flags
, entrylo
;
132 unsigned long old_ctx
;
136 BUG_ON(Page_dcache_dirty(page
));
139 idx
= (addr
>> PAGE_SHIFT
) & (FIX_N_COLOURS
- 1);
140 #ifdef CONFIG_MIPS_MT_SMTC
141 idx
+= FIX_N_COLOURS
* smp_processor_id();
143 vaddr
= __fix_to_virt(FIX_CMAP_END
- idx
);
144 pte
= mk_pte(page
, PAGE_KERNEL
);
145 #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32_R1)
146 entrylo
= pte
.pte_high
;
148 entrylo
= pte_val(pte
) >> 6;
151 ENTER_CRITICAL(flags
);
152 old_ctx
= read_c0_entryhi();
153 write_c0_entryhi(vaddr
& (PAGE_MASK
<< 1));
154 write_c0_entrylo0(entrylo
);
155 write_c0_entrylo1(entrylo
);
156 #ifdef CONFIG_MIPS_MT_SMTC
157 set_pte(kmap_coherent_pte
- (FIX_CMAP_END
- idx
), pte
);
158 /* preload TLB instead of local_flush_tlb_one() */
162 tlbidx
= read_c0_index();
169 tlbidx
= read_c0_wired();
170 write_c0_wired(tlbidx
+ 1);
171 write_c0_index(tlbidx
);
176 write_c0_entryhi(old_ctx
);
177 EXIT_CRITICAL(flags
);
179 return (void*) vaddr
;
182 #define UNIQUE_ENTRYHI(idx) (CKSEG0 + ((idx) << (PAGE_SHIFT + 1)))
184 void kunmap_coherent(void)
186 #ifndef CONFIG_MIPS_MT_SMTC
188 unsigned long flags
, old_ctx
;
190 ENTER_CRITICAL(flags
);
191 old_ctx
= read_c0_entryhi();
192 wired
= read_c0_wired() - 1;
193 write_c0_wired(wired
);
194 write_c0_index(wired
);
195 write_c0_entryhi(UNIQUE_ENTRYHI(wired
));
196 write_c0_entrylo0(0);
197 write_c0_entrylo1(0);
201 write_c0_entryhi(old_ctx
);
202 EXIT_CRITICAL(flags
);
205 preempt_check_resched();
208 void copy_user_highpage(struct page
*to
, struct page
*from
,
209 unsigned long vaddr
, struct vm_area_struct
*vma
)
213 vto
= kmap_atomic(to
, KM_USER1
);
214 if (cpu_has_dc_aliases
&&
215 page_mapped(from
) && !Page_dcache_dirty(from
)) {
216 vfrom
= kmap_coherent(from
, vaddr
);
217 copy_page(vto
, vfrom
);
220 vfrom
= kmap_atomic(from
, KM_USER0
);
221 copy_page(vto
, vfrom
);
222 kunmap_atomic(vfrom
, KM_USER0
);
224 if (((vma
->vm_flags
& VM_EXEC
) && !cpu_has_ic_fills_f_dc
) ||
225 pages_do_alias((unsigned long)vto
, vaddr
& PAGE_MASK
))
226 flush_data_cache_page((unsigned long)vto
);
227 kunmap_atomic(vto
, KM_USER1
);
228 /* Make sure this page is cleared on other CPU's too before using it */
232 EXPORT_SYMBOL(copy_user_highpage
);
234 void copy_to_user_page(struct vm_area_struct
*vma
,
235 struct page
*page
, unsigned long vaddr
, void *dst
, const void *src
,
238 if (cpu_has_dc_aliases
&&
239 page_mapped(page
) && !Page_dcache_dirty(page
)) {
240 void *vto
= kmap_coherent(page
, vaddr
) + (vaddr
& ~PAGE_MASK
);
241 memcpy(vto
, src
, len
);
244 memcpy(dst
, src
, len
);
245 if (cpu_has_dc_aliases
)
246 SetPageDcacheDirty(page
);
248 if ((vma
->vm_flags
& VM_EXEC
) && !cpu_has_ic_fills_f_dc
)
249 flush_cache_page(vma
, vaddr
, page_to_pfn(page
));
252 EXPORT_SYMBOL(copy_to_user_page
);
254 void copy_from_user_page(struct vm_area_struct
*vma
,
255 struct page
*page
, unsigned long vaddr
, void *dst
, const void *src
,
258 if (cpu_has_dc_aliases
&&
259 page_mapped(page
) && !Page_dcache_dirty(page
)) {
260 void *vfrom
= kmap_coherent(page
, vaddr
) + (vaddr
& ~PAGE_MASK
);
261 memcpy(dst
, vfrom
, len
);
264 memcpy(dst
, src
, len
);
265 if (cpu_has_dc_aliases
)
266 SetPageDcacheDirty(page
);
270 EXPORT_SYMBOL(copy_from_user_page
);
273 #ifdef CONFIG_HIGHMEM
274 unsigned long highstart_pfn
, highend_pfn
;
279 static void __init
kmap_init(void)
281 unsigned long kmap_vstart
;
283 /* cache the first kmap pte */
284 kmap_vstart
= __fix_to_virt(FIX_KMAP_BEGIN
);
285 kmap_pte
= kmap_get_fixmap_pte(kmap_vstart
);
287 kmap_prot
= PAGE_KERNEL
;
289 #endif /* CONFIG_HIGHMEM */
291 void __init
fixrange_init(unsigned long start
, unsigned long end
,
294 #if defined(CONFIG_HIGHMEM) || defined(CONFIG_MIPS_MT_SMTC)
303 i
= __pgd_offset(vaddr
);
304 j
= __pud_offset(vaddr
);
305 k
= __pmd_offset(vaddr
);
308 for ( ; (i
< PTRS_PER_PGD
) && (vaddr
!= end
); pgd
++, i
++) {
310 for ( ; (j
< PTRS_PER_PUD
) && (vaddr
!= end
); pud
++, j
++) {
312 for (; (k
< PTRS_PER_PMD
) && (vaddr
!= end
); pmd
++, k
++) {
313 if (pmd_none(*pmd
)) {
314 pte
= (pte_t
*) alloc_bootmem_low_pages(PAGE_SIZE
);
315 set_pmd(pmd
, __pmd((unsigned long)pte
));
316 if (pte
!= pte_offset_kernel(pmd
, 0))
328 #ifndef CONFIG_NEED_MULTIPLE_NODES
329 static int __init
page_is_ram(unsigned long pagenr
)
333 for (i
= 0; i
< boot_mem_map
.nr_map
; i
++) {
334 unsigned long addr
, end
;
336 if (boot_mem_map
.map
[i
].type
!= BOOT_MEM_RAM
)
337 /* not usable memory */
340 addr
= PFN_UP(boot_mem_map
.map
[i
].addr
);
341 end
= PFN_DOWN(boot_mem_map
.map
[i
].addr
+
342 boot_mem_map
.map
[i
].size
);
344 if (pagenr
>= addr
&& pagenr
< end
)
351 void __init
paging_init(void)
353 unsigned long max_zone_pfns
[MAX_NR_ZONES
];
354 unsigned long lastpfn
;
358 #ifdef CONFIG_HIGHMEM
361 kmap_coherent_init();
363 #ifdef CONFIG_ZONE_DMA
364 max_zone_pfns
[ZONE_DMA
] = MAX_DMA_PFN
;
366 #ifdef CONFIG_ZONE_DMA32
367 max_zone_pfns
[ZONE_DMA32
] = MAX_DMA32_PFN
;
369 max_zone_pfns
[ZONE_NORMAL
] = max_low_pfn
;
370 lastpfn
= max_low_pfn
;
371 #ifdef CONFIG_HIGHMEM
372 max_zone_pfns
[ZONE_HIGHMEM
] = highend_pfn
;
373 lastpfn
= highend_pfn
;
375 if (cpu_has_dc_aliases
&& max_low_pfn
!= highend_pfn
) {
376 printk(KERN_WARNING
"This processor doesn't support highmem."
377 " %ldk highmem ignored\n",
378 (highend_pfn
- max_low_pfn
) << (PAGE_SHIFT
- 10));
379 max_zone_pfns
[ZONE_HIGHMEM
] = max_low_pfn
;
380 lastpfn
= max_low_pfn
;
384 free_area_init_nodes(max_zone_pfns
);
387 static struct kcore_list kcore_mem
, kcore_vmalloc
;
389 static struct kcore_list kcore_kseg0
;
392 void __init
mem_init(void)
394 unsigned long codesize
, reservedpages
, datasize
, initsize
;
395 unsigned long tmp
, ram
;
397 #ifdef CONFIG_HIGHMEM
398 #ifdef CONFIG_DISCONTIGMEM
399 #error "CONFIG_HIGHMEM and CONFIG_DISCONTIGMEM dont work together yet"
401 max_mapnr
= highend_pfn
;
403 max_mapnr
= max_low_pfn
;
405 high_memory
= (void *) __va(max_low_pfn
<< PAGE_SHIFT
);
407 totalram_pages
+= free_all_bootmem();
408 totalram_pages
-= setup_zero_pages(); /* Setup zeroed pages. */
410 reservedpages
= ram
= 0;
411 for (tmp
= 0; tmp
< max_low_pfn
; tmp
++)
412 if (page_is_ram(tmp
)) {
414 if (PageReserved(pfn_to_page(tmp
)))
419 #ifdef CONFIG_HIGHMEM
420 for (tmp
= highstart_pfn
; tmp
< highend_pfn
; tmp
++) {
421 struct page
*page
= pfn_to_page(tmp
);
423 if (!page_is_ram(tmp
)) {
424 SetPageReserved(page
);
427 ClearPageReserved(page
);
428 init_page_count(page
);
432 totalram_pages
+= totalhigh_pages
;
433 num_physpages
+= totalhigh_pages
;
436 codesize
= (unsigned long) &_etext
- (unsigned long) &_text
;
437 datasize
= (unsigned long) &_edata
- (unsigned long) &_etext
;
438 initsize
= (unsigned long) &__init_end
- (unsigned long) &__init_begin
;
441 if ((unsigned long) &_text
> (unsigned long) CKSEG0
)
442 /* The -4 is a hack so that user tools don't have to handle
444 kclist_add(&kcore_kseg0
, (void *) CKSEG0
, 0x80000000 - 4);
446 kclist_add(&kcore_mem
, __va(0), max_low_pfn
<< PAGE_SHIFT
);
447 kclist_add(&kcore_vmalloc
, (void *)VMALLOC_START
,
448 VMALLOC_END
-VMALLOC_START
);
450 printk(KERN_INFO
"Memory: %luk/%luk available (%ldk kernel code, "
451 "%ldk reserved, %ldk data, %ldk init, %ldk highmem)\n",
452 (unsigned long) nr_free_pages() << (PAGE_SHIFT
-10),
453 ram
<< (PAGE_SHIFT
-10),
455 reservedpages
<< (PAGE_SHIFT
-10),
458 (unsigned long) (totalhigh_pages
<< (PAGE_SHIFT
-10)));
460 #endif /* !CONFIG_NEED_MULTIPLE_NODES */
462 void free_init_pages(const char *what
, unsigned long begin
, unsigned long end
)
466 for (pfn
= PFN_UP(begin
); pfn
< PFN_DOWN(end
); pfn
++) {
467 struct page
*page
= pfn_to_page(pfn
);
468 void *addr
= phys_to_virt(PFN_PHYS(pfn
));
470 ClearPageReserved(page
);
471 init_page_count(page
);
472 memset(addr
, POISON_FREE_INITMEM
, PAGE_SIZE
);
476 printk(KERN_INFO
"Freeing %s: %ldk freed\n", what
, (end
- begin
) >> 10);
479 #ifdef CONFIG_BLK_DEV_INITRD
480 void free_initrd_mem(unsigned long start
, unsigned long end
)
482 free_init_pages("initrd memory",
483 virt_to_phys((void *)start
),
484 virt_to_phys((void *)end
));
488 void __init_refok
free_initmem(void)
490 prom_free_prom_memory();
491 free_init_pages("unused kernel memory",
492 __pa_symbol(&__init_begin
),
493 __pa_symbol(&__init_end
));
496 unsigned long pgd_current
[NR_CPUS
];
498 * On 64-bit we've got three-level pagetables with a slightly
499 * different layout ...
501 #define __page_aligned(order) __attribute__((__aligned__(PAGE_SIZE<<order)))
504 * gcc 3.3 and older have trouble determining that PTRS_PER_PGD and PGD_ORDER
505 * are constants. So we use the variants from asm-offset.h until that gcc
506 * will officially be retired.
508 pgd_t swapper_pg_dir
[_PTRS_PER_PGD
] __page_aligned(_PGD_ORDER
);
511 pgd_t module_pg_dir
[PTRS_PER_PGD
] __page_aligned(PGD_ORDER
);
513 pmd_t invalid_pmd_table
[PTRS_PER_PMD
] __page_aligned(PMD_ORDER
);
515 pte_t invalid_pte_table
[PTRS_PER_PTE
] __page_aligned(PTE_ORDER
);