3 * Copyright IBM Corp. 1999
4 * Author(s): Hartmut Penner (hp@de.ibm.com)
6 * Derived from "arch/i386/mm/init.c"
7 * Copyright (C) 1995 Linus Torvalds
10 #include <linux/signal.h>
11 #include <linux/sched.h>
12 #include <linux/kernel.h>
13 #include <linux/errno.h>
14 #include <linux/string.h>
15 #include <linux/types.h>
16 #include <linux/ptrace.h>
17 #include <linux/mman.h>
19 #include <linux/swap.h>
20 #include <linux/smp.h>
21 #include <linux/init.h>
22 #include <linux/pagemap.h>
23 #include <linux/bootmem.h>
24 #include <linux/pfn.h>
25 #include <linux/poison.h>
26 #include <linux/initrd.h>
27 #include <linux/export.h>
28 #include <linux/gfp.h>
29 #include <asm/processor.h>
30 #include <asm/uaccess.h>
31 #include <asm/pgtable.h>
32 #include <asm/pgalloc.h>
34 #include <asm/lowcore.h>
36 #include <asm/tlbflush.h>
37 #include <asm/sections.h>
38 #include <asm/ctl_reg.h>
40 pgd_t swapper_pg_dir
[PTRS_PER_PGD
] __attribute__((__aligned__(PAGE_SIZE
)));
42 unsigned long empty_zero_page
, zero_page_mask
;
43 EXPORT_SYMBOL(empty_zero_page
);
45 static unsigned long __init
setup_zero_pages(void)
54 switch (cpu_id
.machine
) {
56 case 0x2064: /* z900 */
57 case 0x2066: /* z900 */
58 case 0x2084: /* z990 */
59 case 0x2086: /* z990 */
60 case 0x2094: /* z9-109 */
61 case 0x2096: /* z9-109 */
64 case 0x2097: /* z10 */
65 case 0x2098: /* z10 */
71 empty_zero_page
= __get_free_pages(GFP_KERNEL
| __GFP_ZERO
, order
);
73 panic("Out of memory in setup_zero_pages");
75 page
= virt_to_page((void *) empty_zero_page
);
76 split_page(page
, order
);
77 for (i
= 1 << order
; i
> 0; i
--) {
78 SetPageReserved(page
);
82 size
= PAGE_SIZE
<< order
;
83 zero_page_mask
= (size
- 1) & PAGE_MASK
;
89 * paging_init() sets up the page tables
91 void __init
paging_init(void)
93 unsigned long max_zone_pfns
[MAX_NR_ZONES
];
94 unsigned long pgd_type
, asce_bits
;
96 init_mm
.pgd
= swapper_pg_dir
;
98 if (VMALLOC_END
> (1UL << 42)) {
99 asce_bits
= _ASCE_TYPE_REGION2
| _ASCE_TABLE_LENGTH
;
100 pgd_type
= _REGION2_ENTRY_EMPTY
;
102 asce_bits
= _ASCE_TYPE_REGION3
| _ASCE_TABLE_LENGTH
;
103 pgd_type
= _REGION3_ENTRY_EMPTY
;
106 asce_bits
= _ASCE_TABLE_LENGTH
;
107 pgd_type
= _SEGMENT_ENTRY_EMPTY
;
109 S390_lowcore
.kernel_asce
= (__pa(init_mm
.pgd
) & PAGE_MASK
) | asce_bits
;
110 clear_table((unsigned long *) init_mm
.pgd
, pgd_type
,
111 sizeof(unsigned long)*2048);
114 /* enable virtual mapping in kernel mode */
115 __ctl_load(S390_lowcore
.kernel_asce
, 1, 1);
116 __ctl_load(S390_lowcore
.kernel_asce
, 7, 7);
117 __ctl_load(S390_lowcore
.kernel_asce
, 13, 13);
118 arch_local_irq_restore(4UL << (BITS_PER_LONG
- 8));
120 atomic_set(&init_mm
.context
.attach_count
, 1);
122 sparse_memory_present_with_active_regions(MAX_NUMNODES
);
124 memset(max_zone_pfns
, 0, sizeof(max_zone_pfns
));
125 max_zone_pfns
[ZONE_DMA
] = PFN_DOWN(MAX_DMA_ADDRESS
);
126 max_zone_pfns
[ZONE_NORMAL
] = max_low_pfn
;
127 free_area_init_nodes(max_zone_pfns
);
130 void __init
mem_init(void)
132 unsigned long codesize
, reservedpages
, datasize
, initsize
;
134 max_mapnr
= num_physpages
= max_low_pfn
;
135 high_memory
= (void *) __va(max_low_pfn
* PAGE_SIZE
);
137 /* Setup guest page hinting */
140 /* this will put all low memory onto the freelists */
141 totalram_pages
+= free_all_bootmem();
142 totalram_pages
-= setup_zero_pages(); /* Setup zeroed pages. */
146 codesize
= (unsigned long) &_etext
- (unsigned long) &_text
;
147 datasize
= (unsigned long) &_edata
- (unsigned long) &_etext
;
148 initsize
= (unsigned long) &__init_end
- (unsigned long) &__init_begin
;
149 printk("Memory: %luk/%luk available (%ldk kernel code, %ldk reserved, %ldk data, %ldk init)\n",
150 nr_free_pages() << (PAGE_SHIFT
-10),
151 max_mapnr
<< (PAGE_SHIFT
-10),
153 reservedpages
<< (PAGE_SHIFT
-10),
156 printk("Write protected kernel read-only data: %#lx - %#lx\n",
157 (unsigned long)&_stext
,
158 PFN_ALIGN((unsigned long)&_eshared
) - 1);
161 void free_init_pages(char *what
, unsigned long begin
, unsigned long end
)
163 unsigned long addr
= begin
;
167 for (; addr
< end
; addr
+= PAGE_SIZE
) {
168 ClearPageReserved(virt_to_page(addr
));
169 init_page_count(virt_to_page(addr
));
170 memset((void *)(addr
& PAGE_MASK
), POISON_FREE_INITMEM
,
175 printk(KERN_INFO
"Freeing %s: %luk freed\n", what
, (end
- begin
) >> 10);
178 void free_initmem(void)
180 free_init_pages("unused kernel memory",
181 (unsigned long)&__init_begin
,
182 (unsigned long)&__init_end
);
185 #ifdef CONFIG_BLK_DEV_INITRD
186 void __init
free_initrd_mem(unsigned long start
, unsigned long end
)
188 free_init_pages("initrd memory", start
, end
);
192 #ifdef CONFIG_MEMORY_HOTPLUG
193 int arch_add_memory(int nid
, u64 start
, u64 size
)
195 unsigned long zone_start_pfn
, zone_end_pfn
, nr_pages
;
196 unsigned long start_pfn
= PFN_DOWN(start
);
197 unsigned long size_pages
= PFN_DOWN(size
);
201 rc
= vmem_add_mapping(start
, size
);
204 for_each_zone(zone
) {
205 if (zone_idx(zone
) != ZONE_MOVABLE
) {
206 /* Add range within existing zone limits */
207 zone_start_pfn
= zone
->zone_start_pfn
;
208 zone_end_pfn
= zone
->zone_start_pfn
+
211 /* Add remaining range to ZONE_MOVABLE */
212 zone_start_pfn
= start_pfn
;
213 zone_end_pfn
= start_pfn
+ size_pages
;
215 if (start_pfn
< zone_start_pfn
|| start_pfn
>= zone_end_pfn
)
217 nr_pages
= (start_pfn
+ size_pages
> zone_end_pfn
) ?
218 zone_end_pfn
- start_pfn
: size_pages
;
219 rc
= __add_pages(nid
, zone
, start_pfn
, nr_pages
);
222 start_pfn
+= nr_pages
;
223 size_pages
-= nr_pages
;
228 vmem_remove_mapping(start
, size
);
231 #endif /* CONFIG_MEMORY_HOTPLUG */