hugetlb: introduce generic version of hugetlb_free_pgd_range
[linux/fpc-iii.git] / arch / x86 / kernel / ldt.c
blobab18e0884dc6fdfb6e403760921b3dc87d7ff592
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 1992 Krishna Balasubramanian and Linus Torvalds
4 * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
5 * Copyright (C) 2002 Andi Kleen
7 * This handles calls from both 32bit and 64bit mode.
9 * Lock order:
10 * contex.ldt_usr_sem
11 * mmap_sem
12 * context.lock
15 #include <linux/errno.h>
16 #include <linux/gfp.h>
17 #include <linux/sched.h>
18 #include <linux/string.h>
19 #include <linux/mm.h>
20 #include <linux/smp.h>
21 #include <linux/syscalls.h>
22 #include <linux/slab.h>
23 #include <linux/vmalloc.h>
24 #include <linux/uaccess.h>
26 #include <asm/ldt.h>
27 #include <asm/tlb.h>
28 #include <asm/desc.h>
29 #include <asm/mmu_context.h>
30 #include <asm/syscalls.h>
32 static void refresh_ldt_segments(void)
34 #ifdef CONFIG_X86_64
35 unsigned short sel;
38 * Make sure that the cached DS and ES descriptors match the updated
39 * LDT.
41 savesegment(ds, sel);
42 if ((sel & SEGMENT_TI_MASK) == SEGMENT_LDT)
43 loadsegment(ds, sel);
45 savesegment(es, sel);
46 if ((sel & SEGMENT_TI_MASK) == SEGMENT_LDT)
47 loadsegment(es, sel);
48 #endif
51 /* context.lock is held by the task which issued the smp function call */
52 static void flush_ldt(void *__mm)
54 struct mm_struct *mm = __mm;
56 if (this_cpu_read(cpu_tlbstate.loaded_mm) != mm)
57 return;
59 load_mm_ldt(mm);
61 refresh_ldt_segments();
64 /* The caller must call finalize_ldt_struct on the result. LDT starts zeroed. */
65 static struct ldt_struct *alloc_ldt_struct(unsigned int num_entries)
67 struct ldt_struct *new_ldt;
68 unsigned int alloc_size;
70 if (num_entries > LDT_ENTRIES)
71 return NULL;
73 new_ldt = kmalloc(sizeof(struct ldt_struct), GFP_KERNEL);
74 if (!new_ldt)
75 return NULL;
77 BUILD_BUG_ON(LDT_ENTRY_SIZE != sizeof(struct desc_struct));
78 alloc_size = num_entries * LDT_ENTRY_SIZE;
81 * Xen is very picky: it requires a page-aligned LDT that has no
82 * trailing nonzero bytes in any page that contains LDT descriptors.
83 * Keep it simple: zero the whole allocation and never allocate less
84 * than PAGE_SIZE.
86 if (alloc_size > PAGE_SIZE)
87 new_ldt->entries = vzalloc(alloc_size);
88 else
89 new_ldt->entries = (void *)get_zeroed_page(GFP_KERNEL);
91 if (!new_ldt->entries) {
92 kfree(new_ldt);
93 return NULL;
96 /* The new LDT isn't aliased for PTI yet. */
97 new_ldt->slot = -1;
99 new_ldt->nr_entries = num_entries;
100 return new_ldt;
103 #ifdef CONFIG_PAGE_TABLE_ISOLATION
105 static void do_sanity_check(struct mm_struct *mm,
106 bool had_kernel_mapping,
107 bool had_user_mapping)
109 if (mm->context.ldt) {
111 * We already had an LDT. The top-level entry should already
112 * have been allocated and synchronized with the usermode
113 * tables.
115 WARN_ON(!had_kernel_mapping);
116 if (static_cpu_has(X86_FEATURE_PTI))
117 WARN_ON(!had_user_mapping);
118 } else {
120 * This is the first time we're mapping an LDT for this process.
121 * Sync the pgd to the usermode tables.
123 WARN_ON(had_kernel_mapping);
124 if (static_cpu_has(X86_FEATURE_PTI))
125 WARN_ON(had_user_mapping);
129 #ifdef CONFIG_X86_PAE
131 static pmd_t *pgd_to_pmd_walk(pgd_t *pgd, unsigned long va)
133 p4d_t *p4d;
134 pud_t *pud;
136 if (pgd->pgd == 0)
137 return NULL;
139 p4d = p4d_offset(pgd, va);
140 if (p4d_none(*p4d))
141 return NULL;
143 pud = pud_offset(p4d, va);
144 if (pud_none(*pud))
145 return NULL;
147 return pmd_offset(pud, va);
150 static void map_ldt_struct_to_user(struct mm_struct *mm)
152 pgd_t *k_pgd = pgd_offset(mm, LDT_BASE_ADDR);
153 pgd_t *u_pgd = kernel_to_user_pgdp(k_pgd);
154 pmd_t *k_pmd, *u_pmd;
156 k_pmd = pgd_to_pmd_walk(k_pgd, LDT_BASE_ADDR);
157 u_pmd = pgd_to_pmd_walk(u_pgd, LDT_BASE_ADDR);
159 if (static_cpu_has(X86_FEATURE_PTI) && !mm->context.ldt)
160 set_pmd(u_pmd, *k_pmd);
163 static void sanity_check_ldt_mapping(struct mm_struct *mm)
165 pgd_t *k_pgd = pgd_offset(mm, LDT_BASE_ADDR);
166 pgd_t *u_pgd = kernel_to_user_pgdp(k_pgd);
167 bool had_kernel, had_user;
168 pmd_t *k_pmd, *u_pmd;
170 k_pmd = pgd_to_pmd_walk(k_pgd, LDT_BASE_ADDR);
171 u_pmd = pgd_to_pmd_walk(u_pgd, LDT_BASE_ADDR);
172 had_kernel = (k_pmd->pmd != 0);
173 had_user = (u_pmd->pmd != 0);
175 do_sanity_check(mm, had_kernel, had_user);
178 #else /* !CONFIG_X86_PAE */
180 static void map_ldt_struct_to_user(struct mm_struct *mm)
182 pgd_t *pgd = pgd_offset(mm, LDT_BASE_ADDR);
184 if (static_cpu_has(X86_FEATURE_PTI) && !mm->context.ldt)
185 set_pgd(kernel_to_user_pgdp(pgd), *pgd);
188 static void sanity_check_ldt_mapping(struct mm_struct *mm)
190 pgd_t *pgd = pgd_offset(mm, LDT_BASE_ADDR);
191 bool had_kernel = (pgd->pgd != 0);
192 bool had_user = (kernel_to_user_pgdp(pgd)->pgd != 0);
194 do_sanity_check(mm, had_kernel, had_user);
197 #endif /* CONFIG_X86_PAE */
200 * If PTI is enabled, this maps the LDT into the kernelmode and
201 * usermode tables for the given mm.
203 * There is no corresponding unmap function. Even if the LDT is freed, we
204 * leave the PTEs around until the slot is reused or the mm is destroyed.
205 * This is harmless: the LDT is always in ordinary memory, and no one will
206 * access the freed slot.
208 * If we wanted to unmap freed LDTs, we'd also need to do a flush to make
209 * it useful, and the flush would slow down modify_ldt().
211 static int
212 map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot)
214 unsigned long va;
215 bool is_vmalloc;
216 spinlock_t *ptl;
217 pgd_t *pgd;
218 int i;
220 if (!static_cpu_has(X86_FEATURE_PTI))
221 return 0;
224 * Any given ldt_struct should have map_ldt_struct() called at most
225 * once.
227 WARN_ON(ldt->slot != -1);
229 /* Check if the current mappings are sane */
230 sanity_check_ldt_mapping(mm);
233 * Did we already have the top level entry allocated? We can't
234 * use pgd_none() for this because it doens't do anything on
235 * 4-level page table kernels.
237 pgd = pgd_offset(mm, LDT_BASE_ADDR);
239 is_vmalloc = is_vmalloc_addr(ldt->entries);
241 for (i = 0; i * PAGE_SIZE < ldt->nr_entries * LDT_ENTRY_SIZE; i++) {
242 unsigned long offset = i << PAGE_SHIFT;
243 const void *src = (char *)ldt->entries + offset;
244 unsigned long pfn;
245 pgprot_t pte_prot;
246 pte_t pte, *ptep;
248 va = (unsigned long)ldt_slot_va(slot) + offset;
249 pfn = is_vmalloc ? vmalloc_to_pfn(src) :
250 page_to_pfn(virt_to_page(src));
252 * Treat the PTI LDT range as a *userspace* range.
253 * get_locked_pte() will allocate all needed pagetables
254 * and account for them in this mm.
256 ptep = get_locked_pte(mm, va, &ptl);
257 if (!ptep)
258 return -ENOMEM;
260 * Map it RO so the easy to find address is not a primary
261 * target via some kernel interface which misses a
262 * permission check.
264 pte_prot = __pgprot(__PAGE_KERNEL_RO & ~_PAGE_GLOBAL);
265 /* Filter out unsuppored __PAGE_KERNEL* bits: */
266 pgprot_val(pte_prot) &= __supported_pte_mask;
267 pte = pfn_pte(pfn, pte_prot);
268 set_pte_at(mm, va, ptep, pte);
269 pte_unmap_unlock(ptep, ptl);
272 /* Propagate LDT mapping to the user page-table */
273 map_ldt_struct_to_user(mm);
275 va = (unsigned long)ldt_slot_va(slot);
276 flush_tlb_mm_range(mm, va, va + LDT_SLOT_STRIDE, PAGE_SHIFT, false);
278 ldt->slot = slot;
279 return 0;
282 #else /* !CONFIG_PAGE_TABLE_ISOLATION */
284 static int
285 map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot)
287 return 0;
289 #endif /* CONFIG_PAGE_TABLE_ISOLATION */
291 static void free_ldt_pgtables(struct mm_struct *mm)
293 #ifdef CONFIG_PAGE_TABLE_ISOLATION
294 struct mmu_gather tlb;
295 unsigned long start = LDT_BASE_ADDR;
296 unsigned long end = LDT_END_ADDR;
298 if (!static_cpu_has(X86_FEATURE_PTI))
299 return;
301 tlb_gather_mmu(&tlb, mm, start, end);
302 free_pgd_range(&tlb, start, end, start, end);
303 tlb_finish_mmu(&tlb, start, end);
304 #endif
307 /* After calling this, the LDT is immutable. */
308 static void finalize_ldt_struct(struct ldt_struct *ldt)
310 paravirt_alloc_ldt(ldt->entries, ldt->nr_entries);
313 static void install_ldt(struct mm_struct *mm, struct ldt_struct *ldt)
315 mutex_lock(&mm->context.lock);
317 /* Synchronizes with READ_ONCE in load_mm_ldt. */
318 smp_store_release(&mm->context.ldt, ldt);
320 /* Activate the LDT for all CPUs using currents mm. */
321 on_each_cpu_mask(mm_cpumask(mm), flush_ldt, mm, true);
323 mutex_unlock(&mm->context.lock);
326 static void free_ldt_struct(struct ldt_struct *ldt)
328 if (likely(!ldt))
329 return;
331 paravirt_free_ldt(ldt->entries, ldt->nr_entries);
332 if (ldt->nr_entries * LDT_ENTRY_SIZE > PAGE_SIZE)
333 vfree_atomic(ldt->entries);
334 else
335 free_page((unsigned long)ldt->entries);
336 kfree(ldt);
340 * Called on fork from arch_dup_mmap(). Just copy the current LDT state,
341 * the new task is not running, so nothing can be installed.
343 int ldt_dup_context(struct mm_struct *old_mm, struct mm_struct *mm)
345 struct ldt_struct *new_ldt;
346 int retval = 0;
348 if (!old_mm)
349 return 0;
351 mutex_lock(&old_mm->context.lock);
352 if (!old_mm->context.ldt)
353 goto out_unlock;
355 new_ldt = alloc_ldt_struct(old_mm->context.ldt->nr_entries);
356 if (!new_ldt) {
357 retval = -ENOMEM;
358 goto out_unlock;
361 memcpy(new_ldt->entries, old_mm->context.ldt->entries,
362 new_ldt->nr_entries * LDT_ENTRY_SIZE);
363 finalize_ldt_struct(new_ldt);
365 retval = map_ldt_struct(mm, new_ldt, 0);
366 if (retval) {
367 free_ldt_pgtables(mm);
368 free_ldt_struct(new_ldt);
369 goto out_unlock;
371 mm->context.ldt = new_ldt;
373 out_unlock:
374 mutex_unlock(&old_mm->context.lock);
375 return retval;
379 * No need to lock the MM as we are the last user
381 * 64bit: Don't touch the LDT register - we're already in the next thread.
383 void destroy_context_ldt(struct mm_struct *mm)
385 free_ldt_struct(mm->context.ldt);
386 mm->context.ldt = NULL;
389 void ldt_arch_exit_mmap(struct mm_struct *mm)
391 free_ldt_pgtables(mm);
394 static int read_ldt(void __user *ptr, unsigned long bytecount)
396 struct mm_struct *mm = current->mm;
397 unsigned long entries_size;
398 int retval;
400 down_read(&mm->context.ldt_usr_sem);
402 if (!mm->context.ldt) {
403 retval = 0;
404 goto out_unlock;
407 if (bytecount > LDT_ENTRY_SIZE * LDT_ENTRIES)
408 bytecount = LDT_ENTRY_SIZE * LDT_ENTRIES;
410 entries_size = mm->context.ldt->nr_entries * LDT_ENTRY_SIZE;
411 if (entries_size > bytecount)
412 entries_size = bytecount;
414 if (copy_to_user(ptr, mm->context.ldt->entries, entries_size)) {
415 retval = -EFAULT;
416 goto out_unlock;
419 if (entries_size != bytecount) {
420 /* Zero-fill the rest and pretend we read bytecount bytes. */
421 if (clear_user(ptr + entries_size, bytecount - entries_size)) {
422 retval = -EFAULT;
423 goto out_unlock;
426 retval = bytecount;
428 out_unlock:
429 up_read(&mm->context.ldt_usr_sem);
430 return retval;
433 static int read_default_ldt(void __user *ptr, unsigned long bytecount)
435 /* CHECKME: Can we use _one_ random number ? */
436 #ifdef CONFIG_X86_32
437 unsigned long size = 5 * sizeof(struct desc_struct);
438 #else
439 unsigned long size = 128;
440 #endif
441 if (bytecount > size)
442 bytecount = size;
443 if (clear_user(ptr, bytecount))
444 return -EFAULT;
445 return bytecount;
448 static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
450 struct mm_struct *mm = current->mm;
451 struct ldt_struct *new_ldt, *old_ldt;
452 unsigned int old_nr_entries, new_nr_entries;
453 struct user_desc ldt_info;
454 struct desc_struct ldt;
455 int error;
457 error = -EINVAL;
458 if (bytecount != sizeof(ldt_info))
459 goto out;
460 error = -EFAULT;
461 if (copy_from_user(&ldt_info, ptr, sizeof(ldt_info)))
462 goto out;
464 error = -EINVAL;
465 if (ldt_info.entry_number >= LDT_ENTRIES)
466 goto out;
467 if (ldt_info.contents == 3) {
468 if (oldmode)
469 goto out;
470 if (ldt_info.seg_not_present == 0)
471 goto out;
474 if ((oldmode && !ldt_info.base_addr && !ldt_info.limit) ||
475 LDT_empty(&ldt_info)) {
476 /* The user wants to clear the entry. */
477 memset(&ldt, 0, sizeof(ldt));
478 } else {
479 if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) {
480 error = -EINVAL;
481 goto out;
484 fill_ldt(&ldt, &ldt_info);
485 if (oldmode)
486 ldt.avl = 0;
489 if (down_write_killable(&mm->context.ldt_usr_sem))
490 return -EINTR;
492 old_ldt = mm->context.ldt;
493 old_nr_entries = old_ldt ? old_ldt->nr_entries : 0;
494 new_nr_entries = max(ldt_info.entry_number + 1, old_nr_entries);
496 error = -ENOMEM;
497 new_ldt = alloc_ldt_struct(new_nr_entries);
498 if (!new_ldt)
499 goto out_unlock;
501 if (old_ldt)
502 memcpy(new_ldt->entries, old_ldt->entries, old_nr_entries * LDT_ENTRY_SIZE);
504 new_ldt->entries[ldt_info.entry_number] = ldt;
505 finalize_ldt_struct(new_ldt);
508 * If we are using PTI, map the new LDT into the userspace pagetables.
509 * If there is already an LDT, use the other slot so that other CPUs
510 * will continue to use the old LDT until install_ldt() switches
511 * them over to the new LDT.
513 error = map_ldt_struct(mm, new_ldt, old_ldt ? !old_ldt->slot : 0);
514 if (error) {
516 * This only can fail for the first LDT setup. If an LDT is
517 * already installed then the PTE page is already
518 * populated. Mop up a half populated page table.
520 if (!WARN_ON_ONCE(old_ldt))
521 free_ldt_pgtables(mm);
522 free_ldt_struct(new_ldt);
523 goto out_unlock;
526 install_ldt(mm, new_ldt);
527 free_ldt_struct(old_ldt);
528 error = 0;
530 out_unlock:
531 up_write(&mm->context.ldt_usr_sem);
532 out:
533 return error;
536 SYSCALL_DEFINE3(modify_ldt, int , func , void __user * , ptr ,
537 unsigned long , bytecount)
539 int ret = -ENOSYS;
541 switch (func) {
542 case 0:
543 ret = read_ldt(ptr, bytecount);
544 break;
545 case 1:
546 ret = write_ldt(ptr, bytecount, 1);
547 break;
548 case 2:
549 ret = read_default_ldt(ptr, bytecount);
550 break;
551 case 0x11:
552 ret = write_ldt(ptr, bytecount, 0);
553 break;
556 * The SYSCALL_DEFINE() macros give us an 'unsigned long'
557 * return type, but tht ABI for sys_modify_ldt() expects
558 * 'int'. This cast gives us an int-sized value in %rax
559 * for the return code. The 'unsigned' is necessary so
560 * the compiler does not try to sign-extend the negative
561 * return codes into the high half of the register when
562 * taking the value from int->long.
564 return (unsigned int)ret;