treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / arch / alpha / mm / init.c
blob12e218d3792ae033159cf5e766ee489b4cafe6aa
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * linux/arch/alpha/mm/init.c
5 * Copyright (C) 1995 Linus Torvalds
6 */
8 /* 2.3.x zone allocator, 1999 Andrea Arcangeli <andrea@suse.de> */
10 #include <linux/pagemap.h>
11 #include <linux/signal.h>
12 #include <linux/sched.h>
13 #include <linux/kernel.h>
14 #include <linux/errno.h>
15 #include <linux/string.h>
16 #include <linux/types.h>
17 #include <linux/ptrace.h>
18 #include <linux/mman.h>
19 #include <linux/mm.h>
20 #include <linux/swap.h>
21 #include <linux/init.h>
22 #include <linux/memblock.h> /* max_low_pfn */
23 #include <linux/vmalloc.h>
24 #include <linux/gfp.h>
26 #include <linux/uaccess.h>
27 #include <asm/pgtable.h>
28 #include <asm/pgalloc.h>
29 #include <asm/hwrpb.h>
30 #include <asm/dma.h>
31 #include <asm/mmu_context.h>
32 #include <asm/console.h>
33 #include <asm/tlb.h>
34 #include <asm/setup.h>
35 #include <asm/sections.h>
37 extern void die_if_kernel(char *,struct pt_regs *,long);
39 static struct pcb_struct original_pcb;
41 pgd_t *
42 pgd_alloc(struct mm_struct *mm)
44 pgd_t *ret, *init;
46 ret = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
47 init = pgd_offset(&init_mm, 0UL);
48 if (ret) {
49 #ifdef CONFIG_ALPHA_LARGE_VMALLOC
50 memcpy (ret + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
51 (PTRS_PER_PGD - USER_PTRS_PER_PGD - 1)*sizeof(pgd_t));
52 #else
53 pgd_val(ret[PTRS_PER_PGD-2]) = pgd_val(init[PTRS_PER_PGD-2]);
54 #endif
56 /* The last PGD entry is the VPTB self-map. */
57 pgd_val(ret[PTRS_PER_PGD-1])
58 = pte_val(mk_pte(virt_to_page(ret), PAGE_KERNEL));
60 return ret;
65 * BAD_PAGE is the page that is used for page faults when linux
66 * is out-of-memory. Older versions of linux just did a
67 * do_exit(), but using this instead means there is less risk
68 * for a process dying in kernel mode, possibly leaving an inode
69 * unused etc..
71 * BAD_PAGETABLE is the accompanying page-table: it is initialized
72 * to point to BAD_PAGE entries.
74 * ZERO_PAGE is a special page that is used for zero-initialized
75 * data and COW.
77 pmd_t *
78 __bad_pagetable(void)
80 memset((void *) EMPTY_PGT, 0, PAGE_SIZE);
81 return (pmd_t *) EMPTY_PGT;
84 pte_t
85 __bad_page(void)
87 memset((void *) EMPTY_PGE, 0, PAGE_SIZE);
88 return pte_mkdirty(mk_pte(virt_to_page(EMPTY_PGE), PAGE_SHARED));
91 static inline unsigned long
92 load_PCB(struct pcb_struct *pcb)
94 register unsigned long sp __asm__("$30");
95 pcb->ksp = sp;
96 return __reload_thread(pcb);
99 /* Set up initial PCB, VPTB, and other such nicities. */
101 static inline void
102 switch_to_system_map(void)
104 unsigned long newptbr;
105 unsigned long original_pcb_ptr;
107 /* Initialize the kernel's page tables. Linux puts the vptb in
108 the last slot of the L1 page table. */
109 memset(swapper_pg_dir, 0, PAGE_SIZE);
110 newptbr = ((unsigned long) swapper_pg_dir - PAGE_OFFSET) >> PAGE_SHIFT;
111 pgd_val(swapper_pg_dir[1023]) =
112 (newptbr << 32) | pgprot_val(PAGE_KERNEL);
114 /* Set the vptb. This is often done by the bootloader, but
115 shouldn't be required. */
116 if (hwrpb->vptb != 0xfffffffe00000000UL) {
117 wrvptptr(0xfffffffe00000000UL);
118 hwrpb->vptb = 0xfffffffe00000000UL;
119 hwrpb_update_checksum(hwrpb);
122 /* Also set up the real kernel PCB while we're at it. */
123 init_thread_info.pcb.ptbr = newptbr;
124 init_thread_info.pcb.flags = 1; /* set FEN, clear everything else */
125 original_pcb_ptr = load_PCB(&init_thread_info.pcb);
126 tbia();
128 /* Save off the contents of the original PCB so that we can
129 restore the original console's page tables for a clean reboot.
131 Note that the PCB is supposed to be a physical address, but
132 since KSEG values also happen to work, folks get confused.
133 Check this here. */
135 if (original_pcb_ptr < PAGE_OFFSET) {
136 original_pcb_ptr = (unsigned long)
137 phys_to_virt(original_pcb_ptr);
139 original_pcb = *(struct pcb_struct *) original_pcb_ptr;
142 int callback_init_done;
144 void * __init
145 callback_init(void * kernel_end)
147 struct crb_struct * crb;
148 pgd_t *pgd;
149 p4d_t *p4d;
150 pud_t *pud;
151 pmd_t *pmd;
152 void *two_pages;
154 /* Starting at the HWRPB, locate the CRB. */
155 crb = (struct crb_struct *)((char *)hwrpb + hwrpb->crb_offset);
157 if (alpha_using_srm) {
158 /* Tell the console whither it is to be remapped. */
159 if (srm_fixup(VMALLOC_START, (unsigned long)hwrpb))
160 __halt(); /* "We're boned." --Bender */
162 /* Edit the procedure descriptors for DISPATCH and FIXUP. */
163 crb->dispatch_va = (struct procdesc_struct *)
164 (VMALLOC_START + (unsigned long)crb->dispatch_va
165 - crb->map[0].va);
166 crb->fixup_va = (struct procdesc_struct *)
167 (VMALLOC_START + (unsigned long)crb->fixup_va
168 - crb->map[0].va);
171 switch_to_system_map();
173 /* Allocate one PGD and one PMD. In the case of SRM, we'll need
174 these to actually remap the console. There is an assumption
175 here that only one of each is needed, and this allows for 8MB.
176 On systems with larger consoles, additional pages will be
177 allocated as needed during the mapping process.
179 In the case of not SRM, but not CONFIG_ALPHA_LARGE_VMALLOC,
180 we need to allocate the PGD we use for vmalloc before we start
181 forking other tasks. */
183 two_pages = (void *)
184 (((unsigned long)kernel_end + ~PAGE_MASK) & PAGE_MASK);
185 kernel_end = two_pages + 2*PAGE_SIZE;
186 memset(two_pages, 0, 2*PAGE_SIZE);
188 pgd = pgd_offset_k(VMALLOC_START);
189 p4d = p4d_offset(pgd, VMALLOC_START);
190 pud = pud_offset(p4d, VMALLOC_START);
191 pud_set(pud, (pmd_t *)two_pages);
192 pmd = pmd_offset(pud, VMALLOC_START);
193 pmd_set(pmd, (pte_t *)(two_pages + PAGE_SIZE));
195 if (alpha_using_srm) {
196 static struct vm_struct console_remap_vm;
197 unsigned long nr_pages = 0;
198 unsigned long vaddr;
199 unsigned long i, j;
201 /* calculate needed size */
202 for (i = 0; i < crb->map_entries; ++i)
203 nr_pages += crb->map[i].count;
205 /* register the vm area */
206 console_remap_vm.flags = VM_ALLOC;
207 console_remap_vm.size = nr_pages << PAGE_SHIFT;
208 vm_area_register_early(&console_remap_vm, PAGE_SIZE);
210 vaddr = (unsigned long)console_remap_vm.addr;
212 /* Set up the third level PTEs and update the virtual
213 addresses of the CRB entries. */
214 for (i = 0; i < crb->map_entries; ++i) {
215 unsigned long pfn = crb->map[i].pa >> PAGE_SHIFT;
216 crb->map[i].va = vaddr;
217 for (j = 0; j < crb->map[i].count; ++j) {
218 /* Newer consoles (especially on larger
219 systems) may require more pages of
220 PTEs. Grab additional pages as needed. */
221 if (pmd != pmd_offset(pud, vaddr)) {
222 memset(kernel_end, 0, PAGE_SIZE);
223 pmd = pmd_offset(pud, vaddr);
224 pmd_set(pmd, (pte_t *)kernel_end);
225 kernel_end += PAGE_SIZE;
227 set_pte(pte_offset_kernel(pmd, vaddr),
228 pfn_pte(pfn, PAGE_KERNEL));
229 pfn++;
230 vaddr += PAGE_SIZE;
235 callback_init_done = 1;
236 return kernel_end;
240 #ifndef CONFIG_DISCONTIGMEM
242 * paging_init() sets up the memory map.
244 void __init paging_init(void)
246 unsigned long zones_size[MAX_NR_ZONES] = {0, };
247 unsigned long dma_pfn, high_pfn;
249 dma_pfn = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
250 high_pfn = max_pfn = max_low_pfn;
252 if (dma_pfn >= high_pfn)
253 zones_size[ZONE_DMA] = high_pfn;
254 else {
255 zones_size[ZONE_DMA] = dma_pfn;
256 zones_size[ZONE_NORMAL] = high_pfn - dma_pfn;
259 /* Initialize mem_map[]. */
260 free_area_init(zones_size);
262 /* Initialize the kernel's ZERO_PGE. */
263 memset((void *)ZERO_PGE, 0, PAGE_SIZE);
265 #endif /* CONFIG_DISCONTIGMEM */
267 #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_SRM)
268 void
269 srm_paging_stop (void)
271 /* Move the vptb back to where the SRM console expects it. */
272 swapper_pg_dir[1] = swapper_pg_dir[1023];
273 tbia();
274 wrvptptr(0x200000000UL);
275 hwrpb->vptb = 0x200000000UL;
276 hwrpb_update_checksum(hwrpb);
278 /* Reload the page tables that the console had in use. */
279 load_PCB(&original_pcb);
280 tbia();
282 #endif
284 void __init
285 mem_init(void)
287 set_max_mapnr(max_low_pfn);
288 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
289 memblock_free_all();
290 mem_init_print_info(NULL);