2 * linux/arch/alpha/mm/init.c
4 * Copyright (C) 1995 Linus Torvalds
7 /* 2.3.x zone allocator, 1999 Andrea Arcangeli <andrea@suse.de> */
9 #include <linux/pagemap.h>
10 #include <linux/signal.h>
11 #include <linux/sched.h>
12 #include <linux/kernel.h>
13 #include <linux/errno.h>
14 #include <linux/string.h>
15 #include <linux/types.h>
16 #include <linux/ptrace.h>
17 #include <linux/mman.h>
19 #include <linux/swap.h>
20 #include <linux/init.h>
21 #include <linux/bootmem.h> /* max_low_pfn */
22 #include <linux/vmalloc.h>
24 #include <asm/system.h>
25 #include <asm/uaccess.h>
26 #include <asm/pgtable.h>
27 #include <asm/pgalloc.h>
28 #include <asm/hwrpb.h>
30 #include <asm/mmu_context.h>
31 #include <asm/console.h>
34 DEFINE_PER_CPU(struct mmu_gather
, mmu_gathers
);
36 extern void die_if_kernel(char *,struct pt_regs
*,long);
38 static struct pcb_struct original_pcb
;
41 pgd_alloc(struct mm_struct
*mm
)
45 ret
= (pgd_t
*)__get_free_page(GFP_KERNEL
| __GFP_ZERO
);
46 init
= pgd_offset(&init_mm
, 0UL);
48 #ifdef CONFIG_ALPHA_LARGE_VMALLOC
49 memcpy (ret
+ USER_PTRS_PER_PGD
, init
+ USER_PTRS_PER_PGD
,
50 (PTRS_PER_PGD
- USER_PTRS_PER_PGD
- 1)*sizeof(pgd_t
));
52 pgd_val(ret
[PTRS_PER_PGD
-2]) = pgd_val(init
[PTRS_PER_PGD
-2]);
55 /* The last PGD entry is the VPTB self-map. */
56 pgd_val(ret
[PTRS_PER_PGD
-1])
57 = pte_val(mk_pte(virt_to_page(ret
), PAGE_KERNEL
));
63 pte_alloc_one_kernel(struct mm_struct
*mm
, unsigned long address
)
65 pte_t
*pte
= (pte_t
*)__get_free_page(GFP_KERNEL
|__GFP_REPEAT
|__GFP_ZERO
);
71 * BAD_PAGE is the page that is used for page faults when linux
72 * is out-of-memory. Older versions of linux just did a
73 * do_exit(), but using this instead means there is less risk
74 * for a process dying in kernel mode, possibly leaving an inode
77 * BAD_PAGETABLE is the accompanying page-table: it is initialized
78 * to point to BAD_PAGE entries.
80 * ZERO_PAGE is a special page that is used for zero-initialized
86 memset((void *) EMPTY_PGT
, 0, PAGE_SIZE
);
87 return (pmd_t
*) EMPTY_PGT
;
93 memset((void *) EMPTY_PGE
, 0, PAGE_SIZE
);
94 return pte_mkdirty(mk_pte(virt_to_page(EMPTY_PGE
), PAGE_SHARED
));
97 #ifndef CONFIG_DISCONTIGMEM
101 long i
,free
= 0,total
= 0,reserved
= 0;
102 long shared
= 0, cached
= 0;
104 printk("\nMem-info:\n");
106 printk("Free swap: %6ldkB\n", nr_swap_pages
<<(PAGE_SHIFT
-10));
110 if (PageReserved(mem_map
+i
))
112 else if (PageSwapCache(mem_map
+i
))
114 else if (!page_count(mem_map
+i
))
117 shared
+= page_count(mem_map
+ i
) - 1;
119 printk("%ld pages of RAM\n",total
);
120 printk("%ld free pages\n",free
);
121 printk("%ld reserved pages\n",reserved
);
122 printk("%ld pages shared\n",shared
);
123 printk("%ld pages swap cached\n",cached
);
127 static inline unsigned long
128 load_PCB(struct pcb_struct
*pcb
)
130 register unsigned long sp
__asm__("$30");
132 return __reload_thread(pcb
);
135 /* Set up initial PCB, VPTB, and other such nicities. */
138 switch_to_system_map(void)
140 unsigned long newptbr
;
141 unsigned long original_pcb_ptr
;
143 /* Initialize the kernel's page tables. Linux puts the vptb in
144 the last slot of the L1 page table. */
145 memset(swapper_pg_dir
, 0, PAGE_SIZE
);
146 newptbr
= ((unsigned long) swapper_pg_dir
- PAGE_OFFSET
) >> PAGE_SHIFT
;
147 pgd_val(swapper_pg_dir
[1023]) =
148 (newptbr
<< 32) | pgprot_val(PAGE_KERNEL
);
150 /* Set the vptb. This is often done by the bootloader, but
151 shouldn't be required. */
152 if (hwrpb
->vptb
!= 0xfffffffe00000000UL
) {
153 wrvptptr(0xfffffffe00000000UL
);
154 hwrpb
->vptb
= 0xfffffffe00000000UL
;
155 hwrpb_update_checksum(hwrpb
);
158 /* Also set up the real kernel PCB while we're at it. */
159 init_thread_info
.pcb
.ptbr
= newptbr
;
160 init_thread_info
.pcb
.flags
= 1; /* set FEN, clear everything else */
161 original_pcb_ptr
= load_PCB(&init_thread_info
.pcb
);
164 /* Save off the contents of the original PCB so that we can
165 restore the original console's page tables for a clean reboot.
167 Note that the PCB is supposed to be a physical address, but
168 since KSEG values also happen to work, folks get confused.
171 if (original_pcb_ptr
< PAGE_OFFSET
) {
172 original_pcb_ptr
= (unsigned long)
173 phys_to_virt(original_pcb_ptr
);
175 original_pcb
= *(struct pcb_struct
*) original_pcb_ptr
;
178 int callback_init_done
;
181 callback_init(void * kernel_end
)
183 struct crb_struct
* crb
;
188 /* Starting at the HWRPB, locate the CRB. */
189 crb
= (struct crb_struct
*)((char *)hwrpb
+ hwrpb
->crb_offset
);
191 if (alpha_using_srm
) {
192 /* Tell the console whither it is to be remapped. */
193 if (srm_fixup(VMALLOC_START
, (unsigned long)hwrpb
))
194 __halt(); /* "We're boned." --Bender */
196 /* Edit the procedure descriptors for DISPATCH and FIXUP. */
197 crb
->dispatch_va
= (struct procdesc_struct
*)
198 (VMALLOC_START
+ (unsigned long)crb
->dispatch_va
200 crb
->fixup_va
= (struct procdesc_struct
*)
201 (VMALLOC_START
+ (unsigned long)crb
->fixup_va
205 switch_to_system_map();
207 /* Allocate one PGD and one PMD. In the case of SRM, we'll need
208 these to actually remap the console. There is an assumption
209 here that only one of each is needed, and this allows for 8MB.
210 On systems with larger consoles, additional pages will be
211 allocated as needed during the mapping process.
213 In the case of not SRM, but not CONFIG_ALPHA_LARGE_VMALLOC,
214 we need to allocate the PGD we use for vmalloc before we start
215 forking other tasks. */
218 (((unsigned long)kernel_end
+ ~PAGE_MASK
) & PAGE_MASK
);
219 kernel_end
= two_pages
+ 2*PAGE_SIZE
;
220 memset(two_pages
, 0, 2*PAGE_SIZE
);
222 pgd
= pgd_offset_k(VMALLOC_START
);
223 pgd_set(pgd
, (pmd_t
*)two_pages
);
224 pmd
= pmd_offset(pgd
, VMALLOC_START
);
225 pmd_set(pmd
, (pte_t
*)(two_pages
+ PAGE_SIZE
));
227 if (alpha_using_srm
) {
228 static struct vm_struct console_remap_vm
;
229 unsigned long vaddr
= VMALLOC_START
;
232 /* Set up the third level PTEs and update the virtual
233 addresses of the CRB entries. */
234 for (i
= 0; i
< crb
->map_entries
; ++i
) {
235 unsigned long pfn
= crb
->map
[i
].pa
>> PAGE_SHIFT
;
236 crb
->map
[i
].va
= vaddr
;
237 for (j
= 0; j
< crb
->map
[i
].count
; ++j
) {
238 /* Newer consoles (especially on larger
239 systems) may require more pages of
240 PTEs. Grab additional pages as needed. */
241 if (pmd
!= pmd_offset(pgd
, vaddr
)) {
242 memset(kernel_end
, 0, PAGE_SIZE
);
243 pmd
= pmd_offset(pgd
, vaddr
);
244 pmd_set(pmd
, (pte_t
*)kernel_end
);
245 kernel_end
+= PAGE_SIZE
;
247 set_pte(pte_offset_kernel(pmd
, vaddr
),
248 pfn_pte(pfn
, PAGE_KERNEL
));
254 /* Let vmalloc know that we've allocated some space. */
255 console_remap_vm
.flags
= VM_ALLOC
;
256 console_remap_vm
.addr
= (void *) VMALLOC_START
;
257 console_remap_vm
.size
= vaddr
- VMALLOC_START
;
258 vmlist
= &console_remap_vm
;
261 callback_init_done
= 1;
266 #ifndef CONFIG_DISCONTIGMEM
268 * paging_init() sets up the memory map.
270 void __init
paging_init(void)
272 unsigned long zones_size
[MAX_NR_ZONES
] = {0, };
273 unsigned long dma_pfn
, high_pfn
;
275 dma_pfn
= virt_to_phys((char *)MAX_DMA_ADDRESS
) >> PAGE_SHIFT
;
276 high_pfn
= max_pfn
= max_low_pfn
;
278 if (dma_pfn
>= high_pfn
)
279 zones_size
[ZONE_DMA
] = high_pfn
;
281 zones_size
[ZONE_DMA
] = dma_pfn
;
282 zones_size
[ZONE_NORMAL
] = high_pfn
- dma_pfn
;
285 /* Initialize mem_map[]. */
286 free_area_init(zones_size
);
288 /* Initialize the kernel's ZERO_PGE. */
289 memset((void *)ZERO_PGE
, 0, PAGE_SIZE
);
291 #endif /* CONFIG_DISCONTIGMEM */
293 #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_SRM)
295 srm_paging_stop (void)
297 /* Move the vptb back to where the SRM console expects it. */
298 swapper_pg_dir
[1] = swapper_pg_dir
[1023];
300 wrvptptr(0x200000000UL
);
301 hwrpb
->vptb
= 0x200000000UL
;
302 hwrpb_update_checksum(hwrpb
);
304 /* Reload the page tables that the console had in use. */
305 load_PCB(&original_pcb
);
310 #ifndef CONFIG_DISCONTIGMEM
312 printk_memory_info(void)
314 unsigned long codesize
, reservedpages
, datasize
, initsize
, tmp
;
315 extern int page_is_ram(unsigned long) __init
;
316 extern char _text
, _etext
, _data
, _edata
;
317 extern char __init_begin
, __init_end
;
319 /* printk all informations */
321 for (tmp
= 0; tmp
< max_low_pfn
; tmp
++)
323 * Only count reserved RAM pages
325 if (page_is_ram(tmp
) && PageReserved(mem_map
+tmp
))
328 codesize
= (unsigned long) &_etext
- (unsigned long) &_text
;
329 datasize
= (unsigned long) &_edata
- (unsigned long) &_data
;
330 initsize
= (unsigned long) &__init_end
- (unsigned long) &__init_begin
;
332 printk("Memory: %luk/%luk available (%luk kernel code, %luk reserved, %luk data, %luk init)\n",
333 (unsigned long) nr_free_pages() << (PAGE_SHIFT
-10),
334 max_mapnr
<< (PAGE_SHIFT
-10),
336 reservedpages
<< (PAGE_SHIFT
-10),
344 max_mapnr
= num_physpages
= max_low_pfn
;
345 totalram_pages
+= free_all_bootmem();
346 high_memory
= (void *) __va(max_low_pfn
* PAGE_SIZE
);
348 printk_memory_info();
350 #endif /* CONFIG_DISCONTIGMEM */
353 free_reserved_mem(void *start
, void *end
)
355 void *__start
= start
;
356 for (; __start
< end
; __start
+= PAGE_SIZE
) {
357 ClearPageReserved(virt_to_page(__start
));
358 init_page_count(virt_to_page(__start
));
359 free_page((long)__start
);
367 extern char __init_begin
, __init_end
;
369 free_reserved_mem(&__init_begin
, &__init_end
);
370 printk ("Freeing unused kernel memory: %ldk freed\n",
371 (&__init_end
- &__init_begin
) >> 10);
374 #ifdef CONFIG_BLK_DEV_INITRD
376 free_initrd_mem(unsigned long start
, unsigned long end
)
378 free_reserved_mem((void *)start
, (void *)end
);
379 printk ("Freeing initrd memory: %ldk freed\n", (end
- start
) >> 10);