1 // SPDX-License-Identifier: GPL-2.0
3 * linux/arch/sparc/mm/init.c
5 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
6 * Copyright (C) 1995 Eddie C. Dost (ecd@skynet.be)
7 * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
8 * Copyright (C) 2000 Anton Blanchard (anton@samba.org)
11 #include <linux/module.h>
12 #include <linux/signal.h>
13 #include <linux/sched.h>
14 #include <linux/kernel.h>
15 #include <linux/errno.h>
16 #include <linux/string.h>
17 #include <linux/types.h>
18 #include <linux/ptrace.h>
19 #include <linux/mman.h>
21 #include <linux/swap.h>
22 #include <linux/initrd.h>
23 #include <linux/init.h>
24 #include <linux/highmem.h>
25 #include <linux/memblock.h>
26 #include <linux/pagemap.h>
27 #include <linux/poison.h>
28 #include <linux/gfp.h>
30 #include <asm/sections.h>
32 #include <asm/vaddrs.h>
33 #include <asm/setup.h>
40 static unsigned long *sparc_valid_addr_bitmap
;
42 unsigned long phys_base
;
43 EXPORT_SYMBOL(phys_base
);
45 unsigned long pfn_base
;
46 EXPORT_SYMBOL(pfn_base
);
48 struct sparc_phys_banks sp_banks
[SPARC_PHYS_BANKS
+1];
50 /* Initial ramdisk setup */
51 extern unsigned int sparc_ramdisk_image
;
52 extern unsigned int sparc_ramdisk_size
;
54 unsigned long highstart_pfn
, highend_pfn
;
56 unsigned long last_valid_pfn
;
58 unsigned long calc_highpages(void)
63 for (i
= 0; sp_banks
[i
].num_bytes
!= 0; i
++) {
64 unsigned long start_pfn
= sp_banks
[i
].base_addr
>> PAGE_SHIFT
;
65 unsigned long end_pfn
= (sp_banks
[i
].base_addr
+ sp_banks
[i
].num_bytes
) >> PAGE_SHIFT
;
67 if (end_pfn
<= max_low_pfn
)
70 if (start_pfn
< max_low_pfn
)
71 start_pfn
= max_low_pfn
;
73 nr
+= end_pfn
- start_pfn
;
79 static unsigned long calc_max_low_pfn(void)
82 unsigned long tmp
= pfn_base
+ (SRMMU_MAXMEM
>> PAGE_SHIFT
);
83 unsigned long curr_pfn
, last_pfn
;
85 last_pfn
= (sp_banks
[0].base_addr
+ sp_banks
[0].num_bytes
) >> PAGE_SHIFT
;
86 for (i
= 1; sp_banks
[i
].num_bytes
!= 0; i
++) {
87 curr_pfn
= sp_banks
[i
].base_addr
>> PAGE_SHIFT
;
89 if (curr_pfn
>= tmp
) {
95 last_pfn
= (sp_banks
[i
].base_addr
+ sp_banks
[i
].num_bytes
) >> PAGE_SHIFT
;
101 static void __init
find_ramdisk(unsigned long end_of_phys_memory
)
103 #ifdef CONFIG_BLK_DEV_INITRD
106 /* Now have to check initial ramdisk, so that it won't pass
109 if (sparc_ramdisk_image
) {
110 if (sparc_ramdisk_image
>= (unsigned long)&_end
- 2 * PAGE_SIZE
)
111 sparc_ramdisk_image
-= KERNBASE
;
112 initrd_start
= sparc_ramdisk_image
+ phys_base
;
113 initrd_end
= initrd_start
+ sparc_ramdisk_size
;
114 if (initrd_end
> end_of_phys_memory
) {
115 printk(KERN_CRIT
"initrd extends beyond end of memory "
116 "(0x%016lx > 0x%016lx)\ndisabling initrd\n",
117 initrd_end
, end_of_phys_memory
);
120 /* Reserve the initrd image area. */
121 size
= initrd_end
- initrd_start
;
122 memblock_reserve(initrd_start
, size
);
124 initrd_start
= (initrd_start
- phys_base
) + PAGE_OFFSET
;
125 initrd_end
= (initrd_end
- phys_base
) + PAGE_OFFSET
;
131 unsigned long __init
bootmem_init(unsigned long *pages_avail
)
133 unsigned long start_pfn
, bytes_avail
, size
;
134 unsigned long end_of_phys_memory
= 0;
135 unsigned long high_pages
= 0;
138 memblock_set_bottom_up(true);
139 memblock_allow_resize();
142 for (i
= 0; sp_banks
[i
].num_bytes
!= 0; i
++) {
143 end_of_phys_memory
= sp_banks
[i
].base_addr
+
144 sp_banks
[i
].num_bytes
;
145 bytes_avail
+= sp_banks
[i
].num_bytes
;
146 if (cmdline_memory_size
) {
147 if (bytes_avail
> cmdline_memory_size
) {
148 unsigned long slack
= bytes_avail
- cmdline_memory_size
;
150 bytes_avail
-= slack
;
151 end_of_phys_memory
-= slack
;
153 sp_banks
[i
].num_bytes
-= slack
;
154 if (sp_banks
[i
].num_bytes
== 0) {
155 sp_banks
[i
].base_addr
= 0xdeadbeef;
157 memblock_add(sp_banks
[i
].base_addr
,
158 sp_banks
[i
].num_bytes
);
159 sp_banks
[i
+1].num_bytes
= 0;
160 sp_banks
[i
+1].base_addr
= 0xdeadbeef;
165 memblock_add(sp_banks
[i
].base_addr
, sp_banks
[i
].num_bytes
);
168 /* Start with page aligned address of last symbol in kernel
171 start_pfn
= (unsigned long)__pa(PAGE_ALIGN((unsigned long) &_end
));
173 /* Now shift down to get the real physical page frame number. */
174 start_pfn
>>= PAGE_SHIFT
;
176 max_pfn
= end_of_phys_memory
>> PAGE_SHIFT
;
178 max_low_pfn
= max_pfn
;
179 highstart_pfn
= highend_pfn
= max_pfn
;
181 if (max_low_pfn
> pfn_base
+ (SRMMU_MAXMEM
>> PAGE_SHIFT
)) {
182 highstart_pfn
= pfn_base
+ (SRMMU_MAXMEM
>> PAGE_SHIFT
);
183 max_low_pfn
= calc_max_low_pfn();
184 high_pages
= calc_highpages();
185 printk(KERN_NOTICE
"%ldMB HIGHMEM available.\n",
186 high_pages
>> (20 - PAGE_SHIFT
));
189 find_ramdisk(end_of_phys_memory
);
191 /* Reserve the kernel text/data/bss. */
192 size
= (start_pfn
<< PAGE_SHIFT
) - phys_base
;
193 memblock_reserve(phys_base
, size
);
194 memblock_add(phys_base
, size
);
196 size
= memblock_phys_mem_size() - memblock_reserved_size();
197 *pages_avail
= (size
>> PAGE_SHIFT
) - high_pages
;
199 /* Only allow low memory to be allocated via memblock allocation */
200 memblock_set_current_limit(max_low_pfn
<< PAGE_SHIFT
);
206 * paging_init() sets up the page tables: We call the MMU specific
207 * init routine based upon the Sun model type on the Sparc.
210 void __init
paging_init(void)
213 prom_build_devicetree();
214 of_fill_in_cpu_data();
218 static void __init
taint_real_pages(void)
222 for (i
= 0; sp_banks
[i
].num_bytes
; i
++) {
223 unsigned long start
, end
;
225 start
= sp_banks
[i
].base_addr
;
226 end
= start
+ sp_banks
[i
].num_bytes
;
228 while (start
< end
) {
229 set_bit(start
>> 20, sparc_valid_addr_bitmap
);
235 static void map_high_region(unsigned long start_pfn
, unsigned long end_pfn
)
239 #ifdef CONFIG_DEBUG_HIGHMEM
240 printk("mapping high region %08lx - %08lx\n", start_pfn
, end_pfn
);
243 for (tmp
= start_pfn
; tmp
< end_pfn
; tmp
++)
244 free_highmem_page(pfn_to_page(tmp
));
247 void __init
mem_init(void)
251 if (PKMAP_BASE
+LAST_PKMAP
*PAGE_SIZE
>= FIXADDR_START
) {
252 prom_printf("BUG: fixmap and pkmap areas overlap\n");
253 prom_printf("pkbase: 0x%lx pkend: 0x%lx fixstart 0x%lx\n",
255 (unsigned long)PKMAP_BASE
+LAST_PKMAP
*PAGE_SIZE
,
257 prom_printf("Please mail sparclinux@vger.kernel.org.\n");
262 /* Saves us work later. */
263 memset((void *)empty_zero_page
, 0, PAGE_SIZE
);
265 i
= last_valid_pfn
>> ((20 - PAGE_SHIFT
) + 5);
267 sparc_valid_addr_bitmap
= (unsigned long *)
268 memblock_alloc(i
<< 2, SMP_CACHE_BYTES
);
270 if (sparc_valid_addr_bitmap
== NULL
) {
271 prom_printf("mem_init: Cannot alloc valid_addr_bitmap.\n");
274 memset(sparc_valid_addr_bitmap
, 0, i
<< 2);
278 max_mapnr
= last_valid_pfn
- pfn_base
;
279 high_memory
= __va(max_low_pfn
<< PAGE_SHIFT
);
282 for (i
= 0; sp_banks
[i
].num_bytes
!= 0; i
++) {
283 unsigned long start_pfn
= sp_banks
[i
].base_addr
>> PAGE_SHIFT
;
284 unsigned long end_pfn
= (sp_banks
[i
].base_addr
+ sp_banks
[i
].num_bytes
) >> PAGE_SHIFT
;
286 if (end_pfn
<= highstart_pfn
)
289 if (start_pfn
< highstart_pfn
)
290 start_pfn
= highstart_pfn
;
292 map_high_region(start_pfn
, end_pfn
);
296 void sparc_flush_page_to_ram(struct page
*page
)
298 unsigned long vaddr
= (unsigned long)page_address(page
);
300 __flush_page_to_ram(vaddr
);
302 EXPORT_SYMBOL(sparc_flush_page_to_ram
);
304 void sparc_flush_folio_to_ram(struct folio
*folio
)
306 unsigned long vaddr
= (unsigned long)folio_address(folio
);
307 unsigned int i
, nr
= folio_nr_pages(folio
);
309 for (i
= 0; i
< nr
; i
++)
310 __flush_page_to_ram(vaddr
+ i
* PAGE_SIZE
);
312 EXPORT_SYMBOL(sparc_flush_folio_to_ram
);
314 static const pgprot_t protection_map
[16] = {
315 [VM_NONE
] = PAGE_NONE
,
316 [VM_READ
] = PAGE_READONLY
,
317 [VM_WRITE
] = PAGE_COPY
,
318 [VM_WRITE
| VM_READ
] = PAGE_COPY
,
319 [VM_EXEC
] = PAGE_READONLY
,
320 [VM_EXEC
| VM_READ
] = PAGE_READONLY
,
321 [VM_EXEC
| VM_WRITE
] = PAGE_COPY
,
322 [VM_EXEC
| VM_WRITE
| VM_READ
] = PAGE_COPY
,
323 [VM_SHARED
] = PAGE_NONE
,
324 [VM_SHARED
| VM_READ
] = PAGE_READONLY
,
325 [VM_SHARED
| VM_WRITE
] = PAGE_SHARED
,
326 [VM_SHARED
| VM_WRITE
| VM_READ
] = PAGE_SHARED
,
327 [VM_SHARED
| VM_EXEC
] = PAGE_READONLY
,
328 [VM_SHARED
| VM_EXEC
| VM_READ
] = PAGE_READONLY
,
329 [VM_SHARED
| VM_EXEC
| VM_WRITE
] = PAGE_SHARED
,
330 [VM_SHARED
| VM_EXEC
| VM_WRITE
| VM_READ
] = PAGE_SHARED
332 DECLARE_VM_GET_PAGE_PROT