arm64: dts: Revert "specify console via command line"
[linux/fpc-iii.git] / arch / s390 / mm / hugetlbpage.c
blob5674710a4841ce718ee3549a8d7e4f655e6eff1c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * IBM System z Huge TLB Page Support for Kernel.
5 * Copyright IBM Corp. 2007,2020
6 * Author(s): Gerald Schaefer <gerald.schaefer@de.ibm.com>
7 */
9 #define KMSG_COMPONENT "hugetlb"
10 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
12 #include <linux/mm.h>
13 #include <linux/hugetlb.h>
14 #include <linux/mman.h>
15 #include <linux/sched/mm.h>
16 #include <linux/security.h>
19 * If the bit selected by single-bit bitmask "a" is set within "x", move
20 * it to the position indicated by single-bit bitmask "b".
22 #define move_set_bit(x, a, b) (((x) & (a)) >> ilog2(a) << ilog2(b))
24 static inline unsigned long __pte_to_rste(pte_t pte)
26 unsigned long rste;
29 * Convert encoding pte bits pmd / pud bits
30 * lIR.uswrdy.p dy..R...I...wr
31 * empty 010.000000.0 -> 00..0...1...00
32 * prot-none, clean, old 111.000000.1 -> 00..1...1...00
33 * prot-none, clean, young 111.000001.1 -> 01..1...1...00
34 * prot-none, dirty, old 111.000010.1 -> 10..1...1...00
35 * prot-none, dirty, young 111.000011.1 -> 11..1...1...00
36 * read-only, clean, old 111.000100.1 -> 00..1...1...01
37 * read-only, clean, young 101.000101.1 -> 01..1...0...01
38 * read-only, dirty, old 111.000110.1 -> 10..1...1...01
39 * read-only, dirty, young 101.000111.1 -> 11..1...0...01
40 * read-write, clean, old 111.001100.1 -> 00..1...1...11
41 * read-write, clean, young 101.001101.1 -> 01..1...0...11
42 * read-write, dirty, old 110.001110.1 -> 10..0...1...11
43 * read-write, dirty, young 100.001111.1 -> 11..0...0...11
44 * HW-bits: R read-only, I invalid
45 * SW-bits: p present, y young, d dirty, r read, w write, s special,
46 * u unused, l large
48 if (pte_present(pte)) {
49 rste = pte_val(pte) & PAGE_MASK;
50 rste |= move_set_bit(pte_val(pte), _PAGE_READ,
51 _SEGMENT_ENTRY_READ);
52 rste |= move_set_bit(pte_val(pte), _PAGE_WRITE,
53 _SEGMENT_ENTRY_WRITE);
54 rste |= move_set_bit(pte_val(pte), _PAGE_INVALID,
55 _SEGMENT_ENTRY_INVALID);
56 rste |= move_set_bit(pte_val(pte), _PAGE_PROTECT,
57 _SEGMENT_ENTRY_PROTECT);
58 rste |= move_set_bit(pte_val(pte), _PAGE_DIRTY,
59 _SEGMENT_ENTRY_DIRTY);
60 rste |= move_set_bit(pte_val(pte), _PAGE_YOUNG,
61 _SEGMENT_ENTRY_YOUNG);
62 #ifdef CONFIG_MEM_SOFT_DIRTY
63 rste |= move_set_bit(pte_val(pte), _PAGE_SOFT_DIRTY,
64 _SEGMENT_ENTRY_SOFT_DIRTY);
65 #endif
66 rste |= move_set_bit(pte_val(pte), _PAGE_NOEXEC,
67 _SEGMENT_ENTRY_NOEXEC);
68 } else
69 rste = _SEGMENT_ENTRY_EMPTY;
70 return rste;
73 static inline pte_t __rste_to_pte(unsigned long rste)
75 int present;
76 pte_t pte;
78 if ((rste & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
79 present = pud_present(__pud(rste));
80 else
81 present = pmd_present(__pmd(rste));
84 * Convert encoding pmd / pud bits pte bits
85 * dy..R...I...wr lIR.uswrdy.p
86 * empty 00..0...1...00 -> 010.000000.0
87 * prot-none, clean, old 00..1...1...00 -> 111.000000.1
88 * prot-none, clean, young 01..1...1...00 -> 111.000001.1
89 * prot-none, dirty, old 10..1...1...00 -> 111.000010.1
90 * prot-none, dirty, young 11..1...1...00 -> 111.000011.1
91 * read-only, clean, old 00..1...1...01 -> 111.000100.1
92 * read-only, clean, young 01..1...0...01 -> 101.000101.1
93 * read-only, dirty, old 10..1...1...01 -> 111.000110.1
94 * read-only, dirty, young 11..1...0...01 -> 101.000111.1
95 * read-write, clean, old 00..1...1...11 -> 111.001100.1
96 * read-write, clean, young 01..1...0...11 -> 101.001101.1
97 * read-write, dirty, old 10..0...1...11 -> 110.001110.1
98 * read-write, dirty, young 11..0...0...11 -> 100.001111.1
99 * HW-bits: R read-only, I invalid
100 * SW-bits: p present, y young, d dirty, r read, w write, s special,
101 * u unused, l large
103 if (present) {
104 pte_val(pte) = rste & _SEGMENT_ENTRY_ORIGIN_LARGE;
105 pte_val(pte) |= _PAGE_LARGE | _PAGE_PRESENT;
106 pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_READ,
107 _PAGE_READ);
108 pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_WRITE,
109 _PAGE_WRITE);
110 pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_INVALID,
111 _PAGE_INVALID);
112 pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_PROTECT,
113 _PAGE_PROTECT);
114 pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_DIRTY,
115 _PAGE_DIRTY);
116 pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_YOUNG,
117 _PAGE_YOUNG);
118 #ifdef CONFIG_MEM_SOFT_DIRTY
119 pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_SOFT_DIRTY,
120 _PAGE_DIRTY);
121 #endif
122 pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_NOEXEC,
123 _PAGE_NOEXEC);
124 } else
125 pte_val(pte) = _PAGE_INVALID;
126 return pte;
129 static void clear_huge_pte_skeys(struct mm_struct *mm, unsigned long rste)
131 struct page *page;
132 unsigned long size, paddr;
134 if (!mm_uses_skeys(mm) ||
135 rste & _SEGMENT_ENTRY_INVALID)
136 return;
138 if ((rste & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) {
139 page = pud_page(__pud(rste));
140 size = PUD_SIZE;
141 paddr = rste & PUD_MASK;
142 } else {
143 page = pmd_page(__pmd(rste));
144 size = PMD_SIZE;
145 paddr = rste & PMD_MASK;
148 if (!test_and_set_bit(PG_arch_1, &page->flags))
149 __storage_key_init_range(paddr, paddr + size - 1);
152 void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
153 pte_t *ptep, pte_t pte)
155 unsigned long rste;
157 rste = __pte_to_rste(pte);
158 if (!MACHINE_HAS_NX)
159 rste &= ~_SEGMENT_ENTRY_NOEXEC;
161 /* Set correct table type for 2G hugepages */
162 if ((pte_val(*ptep) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
163 rste |= _REGION_ENTRY_TYPE_R3 | _REGION3_ENTRY_LARGE;
164 else
165 rste |= _SEGMENT_ENTRY_LARGE;
166 clear_huge_pte_skeys(mm, rste);
167 pte_val(*ptep) = rste;
170 pte_t huge_ptep_get(pte_t *ptep)
172 return __rste_to_pte(pte_val(*ptep));
175 pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
176 unsigned long addr, pte_t *ptep)
178 pte_t pte = huge_ptep_get(ptep);
179 pmd_t *pmdp = (pmd_t *) ptep;
180 pud_t *pudp = (pud_t *) ptep;
182 if ((pte_val(*ptep) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
183 pudp_xchg_direct(mm, addr, pudp, __pud(_REGION3_ENTRY_EMPTY));
184 else
185 pmdp_xchg_direct(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
186 return pte;
189 pte_t *huge_pte_alloc(struct mm_struct *mm,
190 unsigned long addr, unsigned long sz)
192 pgd_t *pgdp;
193 p4d_t *p4dp;
194 pud_t *pudp;
195 pmd_t *pmdp = NULL;
197 pgdp = pgd_offset(mm, addr);
198 p4dp = p4d_alloc(mm, pgdp, addr);
199 if (p4dp) {
200 pudp = pud_alloc(mm, p4dp, addr);
201 if (pudp) {
202 if (sz == PUD_SIZE)
203 return (pte_t *) pudp;
204 else if (sz == PMD_SIZE)
205 pmdp = pmd_alloc(mm, pudp, addr);
208 return (pte_t *) pmdp;
211 pte_t *huge_pte_offset(struct mm_struct *mm,
212 unsigned long addr, unsigned long sz)
214 pgd_t *pgdp;
215 p4d_t *p4dp;
216 pud_t *pudp;
217 pmd_t *pmdp = NULL;
219 pgdp = pgd_offset(mm, addr);
220 if (pgd_present(*pgdp)) {
221 p4dp = p4d_offset(pgdp, addr);
222 if (p4d_present(*p4dp)) {
223 pudp = pud_offset(p4dp, addr);
224 if (pud_present(*pudp)) {
225 if (pud_large(*pudp))
226 return (pte_t *) pudp;
227 pmdp = pmd_offset(pudp, addr);
231 return (pte_t *) pmdp;
234 int pmd_huge(pmd_t pmd)
236 return pmd_large(pmd);
239 int pud_huge(pud_t pud)
241 return pud_large(pud);
244 struct page *
245 follow_huge_pud(struct mm_struct *mm, unsigned long address,
246 pud_t *pud, int flags)
248 if (flags & FOLL_GET)
249 return NULL;
251 return pud_page(*pud) + ((address & ~PUD_MASK) >> PAGE_SHIFT);
254 static __init int setup_hugepagesz(char *opt)
256 unsigned long size;
257 char *string = opt;
259 size = memparse(opt, &opt);
260 if (MACHINE_HAS_EDAT1 && size == PMD_SIZE) {
261 hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT);
262 } else if (MACHINE_HAS_EDAT2 && size == PUD_SIZE) {
263 hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
264 } else {
265 hugetlb_bad_size();
266 pr_err("hugepagesz= specifies an unsupported page size %s\n",
267 string);
268 return 0;
270 return 1;
272 __setup("hugepagesz=", setup_hugepagesz);
274 static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
275 unsigned long addr, unsigned long len,
276 unsigned long pgoff, unsigned long flags)
278 struct hstate *h = hstate_file(file);
279 struct vm_unmapped_area_info info;
281 info.flags = 0;
282 info.length = len;
283 info.low_limit = current->mm->mmap_base;
284 info.high_limit = TASK_SIZE;
285 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
286 info.align_offset = 0;
287 return vm_unmapped_area(&info);
290 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
291 unsigned long addr0, unsigned long len,
292 unsigned long pgoff, unsigned long flags)
294 struct hstate *h = hstate_file(file);
295 struct vm_unmapped_area_info info;
296 unsigned long addr;
298 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
299 info.length = len;
300 info.low_limit = max(PAGE_SIZE, mmap_min_addr);
301 info.high_limit = current->mm->mmap_base;
302 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
303 info.align_offset = 0;
304 addr = vm_unmapped_area(&info);
307 * A failed mmap() very likely causes application failure,
308 * so fall back to the bottom-up function here. This scenario
309 * can happen with large stack limits and large mmap()
310 * allocations.
312 if (addr & ~PAGE_MASK) {
313 VM_BUG_ON(addr != -ENOMEM);
314 info.flags = 0;
315 info.low_limit = TASK_UNMAPPED_BASE;
316 info.high_limit = TASK_SIZE;
317 addr = vm_unmapped_area(&info);
320 return addr;
323 unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
324 unsigned long len, unsigned long pgoff, unsigned long flags)
326 struct hstate *h = hstate_file(file);
327 struct mm_struct *mm = current->mm;
328 struct vm_area_struct *vma;
329 int rc;
331 if (len & ~huge_page_mask(h))
332 return -EINVAL;
333 if (len > TASK_SIZE - mmap_min_addr)
334 return -ENOMEM;
336 if (flags & MAP_FIXED) {
337 if (prepare_hugepage_range(file, addr, len))
338 return -EINVAL;
339 goto check_asce_limit;
342 if (addr) {
343 addr = ALIGN(addr, huge_page_size(h));
344 vma = find_vma(mm, addr);
345 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
346 (!vma || addr + len <= vm_start_gap(vma)))
347 goto check_asce_limit;
350 if (mm->get_unmapped_area == arch_get_unmapped_area)
351 addr = hugetlb_get_unmapped_area_bottomup(file, addr, len,
352 pgoff, flags);
353 else
354 addr = hugetlb_get_unmapped_area_topdown(file, addr, len,
355 pgoff, flags);
356 if (addr & ~PAGE_MASK)
357 return addr;
359 check_asce_limit:
360 if (addr + len > current->mm->context.asce_limit &&
361 addr + len <= TASK_SIZE) {
362 rc = crst_table_upgrade(mm, addr + len);
363 if (rc)
364 return (unsigned long) rc;
366 return addr;