fs/Makefile: Stupid typo breakage of exofs inclusion
[linux-btrfs-devel.git] / arch / s390 / mm / init.c
blob59b663109d9024af9728547208b74a56d5d2d6a2
1 /*
2 * arch/s390/mm/init.c
4 * S390 version
5 * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Hartmut Penner (hp@de.ibm.com)
8 * Derived from "arch/i386/mm/init.c"
9 * Copyright (C) 1995 Linus Torvalds
12 #include <linux/signal.h>
13 #include <linux/sched.h>
14 #include <linux/kernel.h>
15 #include <linux/errno.h>
16 #include <linux/string.h>
17 #include <linux/types.h>
18 #include <linux/ptrace.h>
19 #include <linux/mman.h>
20 #include <linux/mm.h>
21 #include <linux/swap.h>
22 #include <linux/smp.h>
23 #include <linux/init.h>
24 #include <linux/pagemap.h>
25 #include <linux/bootmem.h>
26 #include <linux/pfn.h>
27 #include <linux/poison.h>
28 #include <linux/initrd.h>
29 #include <linux/gfp.h>
30 #include <asm/processor.h>
31 #include <asm/system.h>
32 #include <asm/uaccess.h>
33 #include <asm/pgtable.h>
34 #include <asm/pgalloc.h>
35 #include <asm/dma.h>
36 #include <asm/lowcore.h>
37 #include <asm/tlb.h>
38 #include <asm/tlbflush.h>
39 #include <asm/sections.h>
41 pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__((__aligned__(PAGE_SIZE)));
43 unsigned long empty_zero_page, zero_page_mask;
44 EXPORT_SYMBOL(empty_zero_page);
46 static unsigned long setup_zero_pages(void)
48 struct cpuid cpu_id;
49 unsigned int order;
50 unsigned long size;
51 struct page *page;
52 int i;
54 get_cpu_id(&cpu_id);
55 switch (cpu_id.machine) {
56 case 0x9672: /* g5 */
57 case 0x2064: /* z900 */
58 case 0x2066: /* z900 */
59 case 0x2084: /* z990 */
60 case 0x2086: /* z990 */
61 case 0x2094: /* z9-109 */
62 case 0x2096: /* z9-109 */
63 order = 0;
64 break;
65 case 0x2097: /* z10 */
66 case 0x2098: /* z10 */
67 default:
68 order = 2;
69 break;
72 empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
73 if (!empty_zero_page)
74 panic("Out of memory in setup_zero_pages");
76 page = virt_to_page((void *) empty_zero_page);
77 split_page(page, order);
78 for (i = 1 << order; i > 0; i--) {
79 SetPageReserved(page);
80 page++;
83 size = PAGE_SIZE << order;
84 zero_page_mask = (size - 1) & PAGE_MASK;
86 return 1UL << order;
90 * paging_init() sets up the page tables
92 void __init paging_init(void)
94 unsigned long max_zone_pfns[MAX_NR_ZONES];
95 unsigned long pgd_type;
97 init_mm.pgd = swapper_pg_dir;
98 S390_lowcore.kernel_asce = __pa(init_mm.pgd) & PAGE_MASK;
99 #ifdef CONFIG_64BIT
100 /* A three level page table (4TB) is enough for the kernel space. */
101 S390_lowcore.kernel_asce |= _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
102 pgd_type = _REGION3_ENTRY_EMPTY;
103 #else
104 S390_lowcore.kernel_asce |= _ASCE_TABLE_LENGTH;
105 pgd_type = _SEGMENT_ENTRY_EMPTY;
106 #endif
107 clear_table((unsigned long *) init_mm.pgd, pgd_type,
108 sizeof(unsigned long)*2048);
109 vmem_map_init();
111 /* enable virtual mapping in kernel mode */
112 __ctl_load(S390_lowcore.kernel_asce, 1, 1);
113 __ctl_load(S390_lowcore.kernel_asce, 7, 7);
114 __ctl_load(S390_lowcore.kernel_asce, 13, 13);
115 arch_local_irq_restore(4UL << (BITS_PER_LONG - 8));
117 atomic_set(&init_mm.context.attach_count, 1);
119 sparse_memory_present_with_active_regions(MAX_NUMNODES);
120 sparse_init();
121 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
122 max_zone_pfns[ZONE_DMA] = PFN_DOWN(MAX_DMA_ADDRESS);
123 max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
124 free_area_init_nodes(max_zone_pfns);
125 fault_init();
128 void __init mem_init(void)
130 unsigned long codesize, reservedpages, datasize, initsize;
132 max_mapnr = num_physpages = max_low_pfn;
133 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
135 /* Setup guest page hinting */
136 cmma_init();
138 /* this will put all low memory onto the freelists */
139 totalram_pages += free_all_bootmem();
140 totalram_pages -= setup_zero_pages(); /* Setup zeroed pages. */
142 reservedpages = 0;
144 codesize = (unsigned long) &_etext - (unsigned long) &_text;
145 datasize = (unsigned long) &_edata - (unsigned long) &_etext;
146 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
147 printk("Memory: %luk/%luk available (%ldk kernel code, %ldk reserved, %ldk data, %ldk init)\n",
148 nr_free_pages() << (PAGE_SHIFT-10),
149 max_mapnr << (PAGE_SHIFT-10),
150 codesize >> 10,
151 reservedpages << (PAGE_SHIFT-10),
152 datasize >>10,
153 initsize >> 10);
154 printk("Write protected kernel read-only data: %#lx - %#lx\n",
155 (unsigned long)&_stext,
156 PFN_ALIGN((unsigned long)&_eshared) - 1);
159 #ifdef CONFIG_DEBUG_PAGEALLOC
160 void kernel_map_pages(struct page *page, int numpages, int enable)
162 pgd_t *pgd;
163 pud_t *pud;
164 pmd_t *pmd;
165 pte_t *pte;
166 unsigned long address;
167 int i;
169 for (i = 0; i < numpages; i++) {
170 address = page_to_phys(page + i);
171 pgd = pgd_offset_k(address);
172 pud = pud_offset(pgd, address);
173 pmd = pmd_offset(pud, address);
174 pte = pte_offset_kernel(pmd, address);
175 if (!enable) {
176 __ptep_ipte(address, pte);
177 pte_val(*pte) = _PAGE_TYPE_EMPTY;
178 continue;
180 *pte = mk_pte_phys(address, __pgprot(_PAGE_TYPE_RW));
181 /* Flush cpu write queue. */
182 mb();
185 #endif
187 void free_init_pages(char *what, unsigned long begin, unsigned long end)
189 unsigned long addr = begin;
191 if (begin >= end)
192 return;
193 for (; addr < end; addr += PAGE_SIZE) {
194 ClearPageReserved(virt_to_page(addr));
195 init_page_count(virt_to_page(addr));
196 memset((void *)(addr & PAGE_MASK), POISON_FREE_INITMEM,
197 PAGE_SIZE);
198 free_page(addr);
199 totalram_pages++;
201 printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10);
204 void free_initmem(void)
206 free_init_pages("unused kernel memory",
207 (unsigned long)&__init_begin,
208 (unsigned long)&__init_end);
211 #ifdef CONFIG_BLK_DEV_INITRD
212 void free_initrd_mem(unsigned long start, unsigned long end)
214 free_init_pages("initrd memory", start, end);
216 #endif
218 #ifdef CONFIG_MEMORY_HOTPLUG
219 int arch_add_memory(int nid, u64 start, u64 size)
221 struct pglist_data *pgdat;
222 struct zone *zone;
223 int rc;
225 pgdat = NODE_DATA(nid);
226 zone = pgdat->node_zones + ZONE_MOVABLE;
227 rc = vmem_add_mapping(start, size);
228 if (rc)
229 return rc;
230 rc = __add_pages(nid, zone, PFN_DOWN(start), PFN_DOWN(size));
231 if (rc)
232 vmem_remove_mapping(start, size);
233 return rc;
235 #endif /* CONFIG_MEMORY_HOTPLUG */