The LDT fixes in particular fix some potentially random strange behaviour.
[davej-history.git] / arch / alpha / mm / init.c
blob3b0ae701122e337b5d26bb2e9ad35b8975cb1a2b
1 /*
2 * linux/arch/alpha/mm/init.c
4 * Copyright (C) 1995 Linus Torvalds
5 */
7 /* 2.3.x zone allocator, 1999 Andrea Arcangeli <andrea@suse.de> */
9 #include <linux/config.h>
10 #include <linux/signal.h>
11 #include <linux/sched.h>
12 #include <linux/kernel.h>
13 #include <linux/errno.h>
14 #include <linux/string.h>
15 #include <linux/types.h>
16 #include <linux/ptrace.h>
17 #include <linux/mman.h>
18 #include <linux/mm.h>
19 #include <linux/swap.h>
20 #include <linux/init.h>
21 #include <linux/bootmem.h> /* max_low_pfn */
22 #include <linux/vmalloc.h>
23 #ifdef CONFIG_BLK_DEV_INITRD
24 #include <linux/blk.h>
25 #endif
27 #include <asm/system.h>
28 #include <asm/uaccess.h>
29 #include <asm/pgtable.h>
30 #include <asm/pgalloc.h>
31 #include <asm/hwrpb.h>
32 #include <asm/dma.h>
33 #include <asm/mmu_context.h>
34 #include <asm/console.h>
36 static unsigned long totalram_pages;
38 extern void die_if_kernel(char *,struct pt_regs *,long);
40 struct thread_struct original_pcb;
42 #ifndef CONFIG_SMP
43 struct pgtable_cache_struct quicklists;
44 #endif
46 void
47 __bad_pmd(pgd_t *pgd)
49 printk("Bad pgd in pmd_alloc: %08lx\n", pgd_val(*pgd));
50 pgd_set(pgd, BAD_PAGETABLE);
53 void
54 __bad_pte(pmd_t *pmd)
56 printk("Bad pmd in pte_alloc: %08lx\n", pmd_val(*pmd));
57 pmd_set(pmd, (pte_t *) BAD_PAGETABLE);
60 pgd_t *
61 get_pgd_slow(void)
63 pgd_t *ret, *init;
65 ret = (pgd_t *)__get_free_page(GFP_KERNEL);
66 init = pgd_offset(&init_mm, 0UL);
67 if (ret) {
68 clear_page(ret);
69 #ifdef CONFIG_ALPHA_LARGE_VMALLOC
70 memcpy (ret + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
71 (PTRS_PER_PGD - USER_PTRS_PER_PGD - 1)*sizeof(pgd_t));
72 #else
73 pgd_val(ret[PTRS_PER_PGD-2]) = pgd_val(init[PTRS_PER_PGD-2]);
74 #endif
76 /* The last PGD entry is the VPTB self-map. */
77 pgd_val(ret[PTRS_PER_PGD-1])
78 = pte_val(mk_pte(virt_to_page(ret), PAGE_KERNEL));
80 return ret;
83 pmd_t *
84 get_pmd_slow(pgd_t *pgd, unsigned long offset)
86 pmd_t *pmd;
88 pmd = (pmd_t *) __get_free_page(GFP_KERNEL);
89 if (pgd_none(*pgd)) {
90 if (pmd) {
91 clear_page((void *)pmd);
92 pgd_set(pgd, pmd);
93 return pmd + offset;
95 pgd_set(pgd, BAD_PAGETABLE);
96 return NULL;
98 free_page((unsigned long)pmd);
99 if (pgd_bad(*pgd)) {
100 __bad_pmd(pgd);
101 return NULL;
103 return (pmd_t *) pgd_page(*pgd) + offset;
106 pte_t *
107 get_pte_slow(pmd_t *pmd, unsigned long offset)
109 pte_t *pte;
111 pte = (pte_t *) __get_free_page(GFP_KERNEL);
112 if (pmd_none(*pmd)) {
113 if (pte) {
114 clear_page((void *)pte);
115 pmd_set(pmd, pte);
116 return pte + offset;
118 pmd_set(pmd, (pte_t *) BAD_PAGETABLE);
119 return NULL;
121 free_page((unsigned long)pte);
122 if (pmd_bad(*pmd)) {
123 __bad_pte(pmd);
124 return NULL;
126 return (pte_t *) pmd_page(*pmd) + offset;
129 int do_check_pgt_cache(int low, int high)
131 int freed = 0;
132 if(pgtable_cache_size > high) {
133 do {
134 if(pgd_quicklist)
135 free_pgd_slow(get_pgd_fast()), freed++;
136 if(pmd_quicklist)
137 free_pmd_slow(get_pmd_fast()), freed++;
138 if(pte_quicklist)
139 free_pte_slow(get_pte_fast()), freed++;
140 } while(pgtable_cache_size > low);
142 return freed;
146 * BAD_PAGE is the page that is used for page faults when linux
147 * is out-of-memory. Older versions of linux just did a
148 * do_exit(), but using this instead means there is less risk
149 * for a process dying in kernel mode, possibly leaving an inode
150 * unused etc..
152 * BAD_PAGETABLE is the accompanying page-table: it is initialized
153 * to point to BAD_PAGE entries.
155 * ZERO_PAGE is a special page that is used for zero-initialized
156 * data and COW.
158 pmd_t *
159 __bad_pagetable(void)
161 memset((void *) EMPTY_PGT, 0, PAGE_SIZE);
162 return (pmd_t *) EMPTY_PGT;
165 pte_t
166 __bad_page(void)
168 memset((void *) EMPTY_PGE, 0, PAGE_SIZE);
169 return pte_mkdirty(mk_pte(virt_to_page(EMPTY_PGE), PAGE_SHARED));
172 void
173 show_mem(void)
175 long i,free = 0,total = 0,reserved = 0;
176 long shared = 0, cached = 0;
178 printk("\nMem-info:\n");
179 show_free_areas();
180 printk("Free swap: %6dkB\n",nr_swap_pages<<(PAGE_SHIFT-10));
181 i = max_mapnr;
182 while (i-- > 0) {
183 total++;
184 if (PageReserved(mem_map+i))
185 reserved++;
186 else if (PageSwapCache(mem_map+i))
187 cached++;
188 else if (!page_count(mem_map+i))
189 free++;
190 else
191 shared += atomic_read(&mem_map[i].count) - 1;
193 printk("%ld pages of RAM\n",total);
194 printk("%ld free pages\n",free);
195 printk("%ld reserved pages\n",reserved);
196 printk("%ld pages shared\n",shared);
197 printk("%ld pages swap cached\n",cached);
198 printk("%ld pages in page table cache\n",pgtable_cache_size);
199 show_buffers();
202 static inline unsigned long
203 load_PCB(struct thread_struct * pcb)
205 register unsigned long sp __asm__("$30");
206 pcb->ksp = sp;
207 return __reload_thread(pcb);
210 /* Set up initial PCB, VPTB, and other such nicities. */
212 static inline void
213 switch_to_system_map(void)
215 unsigned long newptbr;
216 unsigned long original_pcb_ptr;
218 /* Initialize the kernel's page tables. Linux puts the vptb in
219 the last slot of the L1 page table. */
220 memset(swapper_pg_dir, 0, PAGE_SIZE);
221 newptbr = ((unsigned long) swapper_pg_dir - PAGE_OFFSET) >> PAGE_SHIFT;
222 pgd_val(swapper_pg_dir[1023]) =
223 (newptbr << 32) | pgprot_val(PAGE_KERNEL);
225 /* Set the vptb. This is often done by the bootloader, but
226 shouldn't be required. */
227 if (hwrpb->vptb != 0xfffffffe00000000) {
228 wrvptptr(0xfffffffe00000000);
229 hwrpb->vptb = 0xfffffffe00000000;
230 hwrpb_update_checksum(hwrpb);
233 /* Also set up the real kernel PCB while we're at it. */
234 init_task.thread.ptbr = newptbr;
235 init_task.thread.pal_flags = 1; /* set FEN, clear everything else */
236 init_task.thread.flags = 0;
237 original_pcb_ptr = load_PCB(&init_task.thread);
238 tbia();
240 /* Save off the contents of the original PCB so that we can
241 restore the original console's page tables for a clean reboot.
243 Note that the PCB is supposed to be a physical address, but
244 since KSEG values also happen to work, folks get confused.
245 Check this here. */
247 if (original_pcb_ptr < PAGE_OFFSET) {
248 original_pcb_ptr = (unsigned long)
249 phys_to_virt(original_pcb_ptr);
251 original_pcb = *(struct thread_struct *) original_pcb_ptr;
254 int callback_init_done;
256 void * __init
257 callback_init(void * kernel_end)
259 struct crb_struct * crb;
260 pgd_t *pgd;
261 pmd_t *pmd;
262 void *two_pages;
264 /* Starting at the HWRPB, locate the CRB. */
265 crb = (struct crb_struct *)((char *)hwrpb + hwrpb->crb_offset);
267 if (alpha_using_srm) {
268 /* Tell the console whither it is to be remapped. */
269 if (srm_fixup(VMALLOC_START, (unsigned long)hwrpb))
270 __halt(); /* "We're boned." --Bender */
272 /* Edit the procedure descriptors for DISPATCH and FIXUP. */
273 crb->dispatch_va = (struct procdesc_struct *)
274 (VMALLOC_START + (unsigned long)crb->dispatch_va
275 - crb->map[0].va);
276 crb->fixup_va = (struct procdesc_struct *)
277 (VMALLOC_START + (unsigned long)crb->fixup_va
278 - crb->map[0].va);
281 switch_to_system_map();
283 /* Allocate one PGD and one PMD. In the case of SRM, we'll need
284 these to actually remap the console. There is an assumption
285 here that only one of each is needed, and this allows for 8MB.
286 Currently (late 1999), big consoles are still under 4MB.
288 In the case of not SRM, but not CONFIG_ALPHA_LARGE_VMALLOC,
289 we need to allocate the PGD we use for vmalloc before we start
290 forking other tasks. */
292 two_pages = (void *)
293 (((unsigned long)kernel_end + ~PAGE_MASK) & PAGE_MASK);
294 kernel_end = two_pages + 2*PAGE_SIZE;
295 memset(two_pages, 0, 2*PAGE_SIZE);
297 pgd = pgd_offset_k(VMALLOC_START);
298 pgd_set(pgd, (pmd_t *)two_pages);
299 pmd = pmd_offset(pgd, VMALLOC_START);
300 pmd_set(pmd, (pte_t *)(two_pages + PAGE_SIZE));
302 if (alpha_using_srm) {
303 static struct vm_struct console_remap_vm;
304 unsigned long vaddr = VMALLOC_START;
305 long i, j;
307 /* Set up the third level PTEs and update the virtual
308 addresses of the CRB entries. */
309 for (i = 0; i < crb->map_entries; ++i) {
310 unsigned long paddr = crb->map[i].pa;
311 crb->map[i].va = vaddr;
312 for (j = 0; j < crb->map[i].count; ++j) {
313 set_pte(pte_offset(pmd, vaddr),
314 mk_pte_phys(paddr, PAGE_KERNEL));
315 paddr += PAGE_SIZE;
316 vaddr += PAGE_SIZE;
320 /* Let vmalloc know that we've allocated some space. */
321 console_remap_vm.flags = VM_ALLOC;
322 console_remap_vm.addr = VMALLOC_START;
323 console_remap_vm.size = vaddr - VMALLOC_START;
324 vmlist = &console_remap_vm;
327 callback_init_done = 1;
328 return kernel_end;
333 * paging_init() sets up the memory map.
335 void
336 paging_init(void)
338 unsigned long zones_size[MAX_NR_ZONES] = {0, 0, 0};
339 unsigned long dma_pfn, high_pfn;
341 dma_pfn = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
342 high_pfn = max_low_pfn;
344 #define ORDER_MASK (~((1L << (MAX_ORDER-1))-1))
345 #define ORDER_ALIGN(n) (((n) + ~ORDER_MASK) & ORDER_MASK)
347 dma_pfn = ORDER_ALIGN(dma_pfn);
348 high_pfn = ORDER_ALIGN(high_pfn);
350 #undef ORDER_MASK
351 #undef ORDER_ALIGN
353 if (dma_pfn > high_pfn)
354 zones_size[ZONE_DMA] = high_pfn;
355 else {
356 zones_size[ZONE_DMA] = dma_pfn;
357 zones_size[ZONE_NORMAL] = high_pfn - dma_pfn;
360 /* Initialize mem_map[]. */
361 free_area_init(zones_size);
363 /* Initialize the kernel's ZERO_PGE. */
364 memset((void *)ZERO_PGE, 0, PAGE_SIZE);
367 #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_SRM)
368 void
369 srm_paging_stop (void)
371 /* Move the vptb back to where the SRM console expects it. */
372 swapper_pg_dir[1] = swapper_pg_dir[1023];
373 tbia();
374 wrvptptr(0x200000000);
375 hwrpb->vptb = 0x200000000;
376 hwrpb_update_checksum(hwrpb);
378 /* Reload the page tables that the console had in use. */
379 load_PCB(&original_pcb);
380 tbia();
382 #endif
384 static void __init
385 printk_memory_info(void)
387 unsigned long codesize, reservedpages, datasize, initsize, tmp;
388 extern int page_is_ram(unsigned long) __init;
389 extern char _text, _etext, _data, _edata;
390 extern char __init_begin, __init_end;
392 /* printk all informations */
393 reservedpages = 0;
394 for (tmp = 0; tmp < max_low_pfn; tmp++)
396 * Only count reserved RAM pages
398 if (page_is_ram(tmp) && PageReserved(mem_map+tmp))
399 reservedpages++;
401 codesize = (unsigned long) &_etext - (unsigned long) &_text;
402 datasize = (unsigned long) &_edata - (unsigned long) &_data;
403 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
405 printk("Memory: %luk/%luk available (%luk kernel code, %luk reserved, %luk data, %luk init)\n",
406 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
407 max_mapnr << (PAGE_SHIFT-10),
408 codesize >> 10,
409 reservedpages << (PAGE_SHIFT-10),
410 datasize >> 10,
411 initsize >> 10);
414 void __init
415 mem_init(void)
417 max_mapnr = num_physpages = max_low_pfn;
418 totalram_pages += free_all_bootmem();
419 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
421 printk_memory_info();
424 void
425 free_initmem (void)
427 extern char __init_begin, __init_end;
428 unsigned long addr;
430 addr = (unsigned long)(&__init_begin);
431 for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
432 ClearPageReserved(virt_to_page(addr));
433 set_page_count(virt_to_page(addr), 1);
434 free_page(addr);
435 totalram_pages++;
437 printk ("Freeing unused kernel memory: %ldk freed\n",
438 (&__init_end - &__init_begin) >> 10);
441 #ifdef CONFIG_BLK_DEV_INITRD
442 void
443 free_initrd_mem(unsigned long start, unsigned long end)
445 for (; start < end; start += PAGE_SIZE) {
446 ClearPageReserved(virt_to_page(start));
447 set_page_count(virt_to_page(start), 1);
448 free_page(start);
449 totalram_pages++;
451 printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
453 #endif
455 void
456 si_meminfo(struct sysinfo *val)
458 val->totalram = totalram_pages;
459 val->sharedram = 0;
460 val->freeram = nr_free_pages();
461 val->bufferram = atomic_read(&buffermem_pages);
462 val->totalhigh = 0;
463 val->freehigh = 0;
464 val->mem_unit = PAGE_SIZE;