treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / arch / m68k / mm / motorola.c
blob4857985b80805af51880a3962a31e93430441d54
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * linux/arch/m68k/mm/motorola.c
5 * Routines specific to the Motorola MMU, originally from:
6 * linux/arch/m68k/init.c
7 * which are Copyright (C) 1995 Hamish Macdonald
9 * Moved 8/20/1999 Sam Creasey
12 #include <linux/module.h>
13 #include <linux/signal.h>
14 #include <linux/sched.h>
15 #include <linux/mm.h>
16 #include <linux/swap.h>
17 #include <linux/kernel.h>
18 #include <linux/string.h>
19 #include <linux/types.h>
20 #include <linux/init.h>
21 #include <linux/memblock.h>
22 #include <linux/gfp.h>
24 #include <asm/setup.h>
25 #include <linux/uaccess.h>
26 #include <asm/page.h>
27 #include <asm/pgalloc.h>
28 #include <asm/machdep.h>
29 #include <asm/io.h>
30 #include <asm/dma.h>
31 #ifdef CONFIG_ATARI
32 #include <asm/atari_stram.h>
33 #endif
34 #include <asm/sections.h>
36 #undef DEBUG
38 #ifndef mm_cachebits
40 * Bits to add to page descriptors for "normal" caching mode.
41 * For 68020/030 this is 0.
42 * For 68040, this is _PAGE_CACHE040 (cachable, copyback)
44 unsigned long mm_cachebits;
45 EXPORT_SYMBOL(mm_cachebits);
46 #endif
48 /* size of memory already mapped in head.S */
49 extern __initdata unsigned long m68k_init_mapped_size;
51 extern unsigned long availmem;
53 static pte_t * __init kernel_page_table(void)
55 pte_t *ptablep;
57 ptablep = (pte_t *)memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
58 if (!ptablep)
59 panic("%s: Failed to allocate %lu bytes align=%lx\n",
60 __func__, PAGE_SIZE, PAGE_SIZE);
62 clear_page(ptablep);
63 __flush_page_to_ram(ptablep);
64 flush_tlb_kernel_page(ptablep);
65 nocache_page(ptablep);
67 return ptablep;
70 static pmd_t *last_pgtable __initdata = NULL;
71 pmd_t *zero_pgtable __initdata = NULL;
73 static pmd_t * __init kernel_ptr_table(void)
75 if (!last_pgtable) {
76 unsigned long pmd, last;
77 int i;
79 /* Find the last ptr table that was used in head.S and
80 * reuse the remaining space in that page for further
81 * ptr tables.
83 last = (unsigned long)kernel_pg_dir;
84 for (i = 0; i < PTRS_PER_PGD; i++) {
85 pud_t *pud = (pud_t *)(&kernel_pg_dir[i]);
87 if (!pud_present(*pud))
88 continue;
89 pmd = pgd_page_vaddr(kernel_pg_dir[i]);
90 if (pmd > last)
91 last = pmd;
94 last_pgtable = (pmd_t *)last;
95 #ifdef DEBUG
96 printk("kernel_ptr_init: %p\n", last_pgtable);
97 #endif
100 last_pgtable += PTRS_PER_PMD;
101 if (((unsigned long)last_pgtable & ~PAGE_MASK) == 0) {
102 last_pgtable = (pmd_t *)memblock_alloc_low(PAGE_SIZE,
103 PAGE_SIZE);
104 if (!last_pgtable)
105 panic("%s: Failed to allocate %lu bytes align=%lx\n",
106 __func__, PAGE_SIZE, PAGE_SIZE);
108 clear_page(last_pgtable);
109 __flush_page_to_ram(last_pgtable);
110 flush_tlb_kernel_page(last_pgtable);
111 nocache_page(last_pgtable);
114 return last_pgtable;
117 static void __init map_node(int node)
119 #define PTRTREESIZE (256*1024)
120 #define ROOTTREESIZE (32*1024*1024)
121 unsigned long physaddr, virtaddr, size;
122 pgd_t *pgd_dir;
123 p4d_t *p4d_dir;
124 pud_t *pud_dir;
125 pmd_t *pmd_dir;
126 pte_t *pte_dir;
128 size = m68k_memory[node].size;
129 physaddr = m68k_memory[node].addr;
130 virtaddr = (unsigned long)phys_to_virt(physaddr);
131 physaddr |= m68k_supervisor_cachemode |
132 _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY;
133 if (CPU_IS_040_OR_060)
134 physaddr |= _PAGE_GLOBAL040;
136 while (size > 0) {
137 #ifdef DEBUG
138 if (!(virtaddr & (PTRTREESIZE-1)))
139 printk ("\npa=%#lx va=%#lx ", physaddr & PAGE_MASK,
140 virtaddr);
141 #endif
142 pgd_dir = pgd_offset_k(virtaddr);
143 if (virtaddr && CPU_IS_020_OR_030) {
144 if (!(virtaddr & (ROOTTREESIZE-1)) &&
145 size >= ROOTTREESIZE) {
146 #ifdef DEBUG
147 printk ("[very early term]");
148 #endif
149 pgd_val(*pgd_dir) = physaddr;
150 size -= ROOTTREESIZE;
151 virtaddr += ROOTTREESIZE;
152 physaddr += ROOTTREESIZE;
153 continue;
156 p4d_dir = p4d_offset(pgd_dir, virtaddr);
157 pud_dir = pud_offset(p4d_dir, virtaddr);
158 if (!pud_present(*pud_dir)) {
159 pmd_dir = kernel_ptr_table();
160 #ifdef DEBUG
161 printk ("[new pointer %p]", pmd_dir);
162 #endif
163 pud_set(pud_dir, pmd_dir);
164 } else
165 pmd_dir = pmd_offset(pud_dir, virtaddr);
167 if (CPU_IS_020_OR_030) {
168 if (virtaddr) {
169 #ifdef DEBUG
170 printk ("[early term]");
171 #endif
172 pmd_dir->pmd[(virtaddr/PTRTREESIZE) & 15] = physaddr;
173 physaddr += PTRTREESIZE;
174 } else {
175 int i;
176 #ifdef DEBUG
177 printk ("[zero map]");
178 #endif
179 zero_pgtable = kernel_ptr_table();
180 pte_dir = (pte_t *)zero_pgtable;
181 pmd_dir->pmd[0] = virt_to_phys(pte_dir) |
182 _PAGE_TABLE | _PAGE_ACCESSED;
183 pte_val(*pte_dir++) = 0;
184 physaddr += PAGE_SIZE;
185 for (i = 1; i < 64; physaddr += PAGE_SIZE, i++)
186 pte_val(*pte_dir++) = physaddr;
188 size -= PTRTREESIZE;
189 virtaddr += PTRTREESIZE;
190 } else {
191 if (!pmd_present(*pmd_dir)) {
192 #ifdef DEBUG
193 printk ("[new table]");
194 #endif
195 pte_dir = kernel_page_table();
196 pmd_set(pmd_dir, pte_dir);
198 pte_dir = pte_offset_kernel(pmd_dir, virtaddr);
200 if (virtaddr) {
201 if (!pte_present(*pte_dir))
202 pte_val(*pte_dir) = physaddr;
203 } else
204 pte_val(*pte_dir) = 0;
205 size -= PAGE_SIZE;
206 virtaddr += PAGE_SIZE;
207 physaddr += PAGE_SIZE;
211 #ifdef DEBUG
212 printk("\n");
213 #endif
217 * paging_init() continues the virtual memory environment setup which
218 * was begun by the code in arch/head.S.
220 void __init paging_init(void)
222 unsigned long zones_size[MAX_NR_ZONES] = { 0, };
223 unsigned long min_addr, max_addr;
224 unsigned long addr;
225 int i;
227 #ifdef DEBUG
228 printk ("start of paging_init (%p, %lx)\n", kernel_pg_dir, availmem);
229 #endif
231 /* Fix the cache mode in the page descriptors for the 680[46]0. */
232 if (CPU_IS_040_OR_060) {
233 int i;
234 #ifndef mm_cachebits
235 mm_cachebits = _PAGE_CACHE040;
236 #endif
237 for (i = 0; i < 16; i++)
238 pgprot_val(protection_map[i]) |= _PAGE_CACHE040;
241 min_addr = m68k_memory[0].addr;
242 max_addr = min_addr + m68k_memory[0].size;
243 memblock_add(m68k_memory[0].addr, m68k_memory[0].size);
244 for (i = 1; i < m68k_num_memory;) {
245 if (m68k_memory[i].addr < min_addr) {
246 printk("Ignoring memory chunk at 0x%lx:0x%lx before the first chunk\n",
247 m68k_memory[i].addr, m68k_memory[i].size);
248 printk("Fix your bootloader or use a memfile to make use of this area!\n");
249 m68k_num_memory--;
250 memmove(m68k_memory + i, m68k_memory + i + 1,
251 (m68k_num_memory - i) * sizeof(struct m68k_mem_info));
252 continue;
254 memblock_add(m68k_memory[i].addr, m68k_memory[i].size);
255 addr = m68k_memory[i].addr + m68k_memory[i].size;
256 if (addr > max_addr)
257 max_addr = addr;
258 i++;
260 m68k_memoffset = min_addr - PAGE_OFFSET;
261 m68k_virt_to_node_shift = fls(max_addr - min_addr - 1) - 6;
263 module_fixup(NULL, __start_fixup, __stop_fixup);
264 flush_icache();
266 high_memory = phys_to_virt(max_addr);
268 min_low_pfn = availmem >> PAGE_SHIFT;
269 max_pfn = max_low_pfn = max_addr >> PAGE_SHIFT;
271 /* Reserve kernel text/data/bss and the memory allocated in head.S */
272 memblock_reserve(m68k_memory[0].addr, availmem - m68k_memory[0].addr);
275 * Map the physical memory available into the kernel virtual
276 * address space. Make sure memblock will not try to allocate
277 * pages beyond the memory we already mapped in head.S
279 memblock_set_bottom_up(true);
281 for (i = 0; i < m68k_num_memory; i++) {
282 m68k_setup_node(i);
283 map_node(i);
286 flush_tlb_all();
289 * initialize the bad page table and bad page to point
290 * to a couple of allocated pages
292 empty_zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
293 if (!empty_zero_page)
294 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
295 __func__, PAGE_SIZE, PAGE_SIZE);
298 * Set up SFC/DFC registers
300 set_fs(KERNEL_DS);
302 #ifdef DEBUG
303 printk ("before free_area_init\n");
304 #endif
305 for (i = 0; i < m68k_num_memory; i++) {
306 zones_size[ZONE_DMA] = m68k_memory[i].size >> PAGE_SHIFT;
307 free_area_init_node(i, zones_size,
308 m68k_memory[i].addr >> PAGE_SHIFT, NULL);
309 if (node_present_pages(i))
310 node_set_state(i, N_NORMAL_MEMORY);