io_uring: do not always copy iovec in io_req_map_rw()
[linux/fpc-iii.git] / arch / um / kernel / mem.c
blob30885d0b94acfe449dcf6d7470b6aad51e53b3cc
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
4 */
6 #include <linux/stddef.h>
7 #include <linux/module.h>
8 #include <linux/memblock.h>
9 #include <linux/highmem.h>
10 #include <linux/mm.h>
11 #include <linux/swap.h>
12 #include <linux/slab.h>
13 #include <asm/fixmap.h>
14 #include <asm/page.h>
15 #include <as-layout.h>
16 #include <init.h>
17 #include <kern.h>
18 #include <kern_util.h>
19 #include <mem_user.h>
20 #include <os.h>
22 /* allocated in paging_init, zeroed in mem_init, and unchanged thereafter */
23 unsigned long *empty_zero_page = NULL;
24 EXPORT_SYMBOL(empty_zero_page);
27 * Initialized during boot, and readonly for initializing page tables
28 * afterwards
30 pgd_t swapper_pg_dir[PTRS_PER_PGD];
32 /* Initialized at boot time, and readonly after that */
33 unsigned long long highmem;
34 EXPORT_SYMBOL(highmem);
35 int kmalloc_ok = 0;
37 /* Used during early boot */
38 static unsigned long brk_end;
40 void __init mem_init(void)
42 /* clear the zero-page */
43 memset(empty_zero_page, 0, PAGE_SIZE);
45 /* Map in the area just after the brk now that kmalloc is about
46 * to be turned on.
48 brk_end = (unsigned long) UML_ROUND_UP(sbrk(0));
49 map_memory(brk_end, __pa(brk_end), uml_reserved - brk_end, 1, 1, 0);
50 memblock_free(__pa(brk_end), uml_reserved - brk_end);
51 uml_reserved = brk_end;
53 /* this will put all low memory onto the freelists */
54 memblock_free_all();
55 max_low_pfn = totalram_pages();
56 max_pfn = max_low_pfn;
57 mem_init_print_info(NULL);
58 kmalloc_ok = 1;
62 * Create a page table and place a pointer to it in a middle page
63 * directory entry.
65 static void __init one_page_table_init(pmd_t *pmd)
67 if (pmd_none(*pmd)) {
68 pte_t *pte = (pte_t *) memblock_alloc_low(PAGE_SIZE,
69 PAGE_SIZE);
70 if (!pte)
71 panic("%s: Failed to allocate %lu bytes align=%lx\n",
72 __func__, PAGE_SIZE, PAGE_SIZE);
74 set_pmd(pmd, __pmd(_KERNPG_TABLE +
75 (unsigned long) __pa(pte)));
76 if (pte != pte_offset_kernel(pmd, 0))
77 BUG();
81 static void __init one_md_table_init(pud_t *pud)
83 #ifdef CONFIG_3_LEVEL_PGTABLES
84 pmd_t *pmd_table = (pmd_t *) memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
85 if (!pmd_table)
86 panic("%s: Failed to allocate %lu bytes align=%lx\n",
87 __func__, PAGE_SIZE, PAGE_SIZE);
89 set_pud(pud, __pud(_KERNPG_TABLE + (unsigned long) __pa(pmd_table)));
90 if (pmd_table != pmd_offset(pud, 0))
91 BUG();
92 #endif
95 static void __init fixrange_init(unsigned long start, unsigned long end,
96 pgd_t *pgd_base)
98 pgd_t *pgd;
99 p4d_t *p4d;
100 pud_t *pud;
101 pmd_t *pmd;
102 int i, j;
103 unsigned long vaddr;
105 vaddr = start;
106 i = pgd_index(vaddr);
107 j = pmd_index(vaddr);
108 pgd = pgd_base + i;
110 for ( ; (i < PTRS_PER_PGD) && (vaddr < end); pgd++, i++) {
111 p4d = p4d_offset(pgd, vaddr);
112 pud = pud_offset(p4d, vaddr);
113 if (pud_none(*pud))
114 one_md_table_init(pud);
115 pmd = pmd_offset(pud, vaddr);
116 for (; (j < PTRS_PER_PMD) && (vaddr < end); pmd++, j++) {
117 one_page_table_init(pmd);
118 vaddr += PMD_SIZE;
120 j = 0;
124 static void __init fixaddr_user_init( void)
126 #ifdef CONFIG_ARCH_REUSE_HOST_VSYSCALL_AREA
127 long size = FIXADDR_USER_END - FIXADDR_USER_START;
128 pgd_t *pgd;
129 p4d_t *p4d;
130 pud_t *pud;
131 pmd_t *pmd;
132 pte_t *pte;
133 phys_t p;
134 unsigned long v, vaddr = FIXADDR_USER_START;
136 if (!size)
137 return;
139 fixrange_init( FIXADDR_USER_START, FIXADDR_USER_END, swapper_pg_dir);
140 v = (unsigned long) memblock_alloc_low(size, PAGE_SIZE);
141 if (!v)
142 panic("%s: Failed to allocate %lu bytes align=%lx\n",
143 __func__, size, PAGE_SIZE);
145 memcpy((void *) v , (void *) FIXADDR_USER_START, size);
146 p = __pa(v);
147 for ( ; size > 0; size -= PAGE_SIZE, vaddr += PAGE_SIZE,
148 p += PAGE_SIZE) {
149 pgd = swapper_pg_dir + pgd_index(vaddr);
150 p4d = p4d_offset(pgd, vaddr);
151 pud = pud_offset(p4d, vaddr);
152 pmd = pmd_offset(pud, vaddr);
153 pte = pte_offset_kernel(pmd, vaddr);
154 pte_set_val(*pte, p, PAGE_READONLY);
156 #endif
159 void __init paging_init(void)
161 unsigned long zones_size[MAX_NR_ZONES], vaddr;
162 int i;
164 empty_zero_page = (unsigned long *) memblock_alloc_low(PAGE_SIZE,
165 PAGE_SIZE);
166 if (!empty_zero_page)
167 panic("%s: Failed to allocate %lu bytes align=%lx\n",
168 __func__, PAGE_SIZE, PAGE_SIZE);
170 for (i = 0; i < ARRAY_SIZE(zones_size); i++)
171 zones_size[i] = 0;
173 zones_size[ZONE_NORMAL] = (end_iomem >> PAGE_SHIFT) -
174 (uml_physmem >> PAGE_SHIFT);
175 free_area_init(zones_size);
178 * Fixed mappings, only the page table structure has to be
179 * created - mappings will be set by set_fixmap():
181 vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
182 fixrange_init(vaddr, FIXADDR_TOP, swapper_pg_dir);
184 fixaddr_user_init();
188 * This can't do anything because nothing in the kernel image can be freed
189 * since it's not in kernel physical memory.
192 void free_initmem(void)
196 /* Allocate and free page tables. */
198 pgd_t *pgd_alloc(struct mm_struct *mm)
200 pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL);
202 if (pgd) {
203 memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
204 memcpy(pgd + USER_PTRS_PER_PGD,
205 swapper_pg_dir + USER_PTRS_PER_PGD,
206 (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
208 return pgd;
211 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
213 free_page((unsigned long) pgd);
216 #ifdef CONFIG_3_LEVEL_PGTABLES
217 pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
219 pmd_t *pmd = (pmd_t *) __get_free_page(GFP_KERNEL);
221 if (pmd)
222 memset(pmd, 0, PAGE_SIZE);
224 return pmd;
226 #endif
228 void *uml_kmalloc(int size, int flags)
230 return kmalloc(size, flags);