1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 1992 Krishna Balasubramanian and Linus Torvalds
4 * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
5 * Copyright (C) 2002 Andi Kleen
7 * This handles calls from both 32bit and 64bit mode.
15 #include <linux/errno.h>
16 #include <linux/gfp.h>
17 #include <linux/sched.h>
18 #include <linux/string.h>
20 #include <linux/smp.h>
21 #include <linux/syscalls.h>
22 #include <linux/slab.h>
23 #include <linux/vmalloc.h>
24 #include <linux/uaccess.h>
29 #include <asm/mmu_context.h>
30 #include <asm/syscalls.h>
31 #include <asm/pgtable_areas.h>
33 /* This is a multiple of PAGE_SIZE. */
34 #define LDT_SLOT_STRIDE (LDT_ENTRIES * LDT_ENTRY_SIZE)
36 static inline void *ldt_slot_va(int slot
)
38 return (void *)(LDT_BASE_ADDR
+ LDT_SLOT_STRIDE
* slot
);
41 void load_mm_ldt(struct mm_struct
*mm
)
43 struct ldt_struct
*ldt
;
45 /* READ_ONCE synchronizes with smp_store_release */
46 ldt
= READ_ONCE(mm
->context
.ldt
);
49 * Any change to mm->context.ldt is followed by an IPI to all
50 * CPUs with the mm active. The LDT will not be freed until
51 * after the IPI is handled by all such CPUs. This means that,
52 * if the ldt_struct changes before we return, the values we see
53 * will be safe, and the new values will be loaded before we run
56 * NB: don't try to convert this to use RCU without extreme care.
57 * We would still need IRQs off, because we don't want to change
58 * the local LDT after an IPI loaded a newer value than the one
63 if (static_cpu_has(X86_FEATURE_PTI
)) {
64 if (WARN_ON_ONCE((unsigned long)ldt
->slot
> 1)) {
66 * Whoops -- either the new LDT isn't mapped
67 * (if slot == -1) or is mapped into a bogus
75 * If page table isolation is enabled, ldt->entries
76 * will not be mapped in the userspace pagetables.
77 * Tell the CPU to access the LDT through the alias
78 * at ldt_slot_va(ldt->slot).
80 set_ldt(ldt_slot_va(ldt
->slot
), ldt
->nr_entries
);
82 set_ldt(ldt
->entries
, ldt
->nr_entries
);
89 void switch_ldt(struct mm_struct
*prev
, struct mm_struct
*next
)
92 * Load the LDT if either the old or new mm had an LDT.
94 * An mm will never go from having an LDT to not having an LDT. Two
95 * mms never share an LDT, so we don't gain anything by checking to
96 * see whether the LDT changed. There's also no guarantee that
97 * prev->context.ldt actually matches LDTR, but, if LDTR is non-NULL,
98 * then prev->context.ldt will also be non-NULL.
100 * If we really cared, we could optimize the case where prev == next
101 * and we're exiting lazy mode. Most of the time, if this happens,
102 * we don't actually need to reload LDTR, but modify_ldt() is mostly
103 * used by legacy code and emulators where we don't need this level of
106 * This uses | instead of || because it generates better code.
108 if (unlikely((unsigned long)prev
->context
.ldt
|
109 (unsigned long)next
->context
.ldt
))
112 DEBUG_LOCKS_WARN_ON(preemptible());
115 static void refresh_ldt_segments(void)
121 * Make sure that the cached DS and ES descriptors match the updated
124 savesegment(ds
, sel
);
125 if ((sel
& SEGMENT_TI_MASK
) == SEGMENT_LDT
)
126 loadsegment(ds
, sel
);
128 savesegment(es
, sel
);
129 if ((sel
& SEGMENT_TI_MASK
) == SEGMENT_LDT
)
130 loadsegment(es
, sel
);
134 /* context.lock is held by the task which issued the smp function call */
135 static void flush_ldt(void *__mm
)
137 struct mm_struct
*mm
= __mm
;
139 if (this_cpu_read(cpu_tlbstate
.loaded_mm
) != mm
)
144 refresh_ldt_segments();
147 /* The caller must call finalize_ldt_struct on the result. LDT starts zeroed. */
148 static struct ldt_struct
*alloc_ldt_struct(unsigned int num_entries
)
150 struct ldt_struct
*new_ldt
;
151 unsigned int alloc_size
;
153 if (num_entries
> LDT_ENTRIES
)
156 new_ldt
= kmalloc(sizeof(struct ldt_struct
), GFP_KERNEL
);
160 BUILD_BUG_ON(LDT_ENTRY_SIZE
!= sizeof(struct desc_struct
));
161 alloc_size
= num_entries
* LDT_ENTRY_SIZE
;
164 * Xen is very picky: it requires a page-aligned LDT that has no
165 * trailing nonzero bytes in any page that contains LDT descriptors.
166 * Keep it simple: zero the whole allocation and never allocate less
169 if (alloc_size
> PAGE_SIZE
)
170 new_ldt
->entries
= vzalloc(alloc_size
);
172 new_ldt
->entries
= (void *)get_zeroed_page(GFP_KERNEL
);
174 if (!new_ldt
->entries
) {
179 /* The new LDT isn't aliased for PTI yet. */
182 new_ldt
->nr_entries
= num_entries
;
186 #ifdef CONFIG_PAGE_TABLE_ISOLATION
188 static void do_sanity_check(struct mm_struct
*mm
,
189 bool had_kernel_mapping
,
190 bool had_user_mapping
)
192 if (mm
->context
.ldt
) {
194 * We already had an LDT. The top-level entry should already
195 * have been allocated and synchronized with the usermode
198 WARN_ON(!had_kernel_mapping
);
199 if (boot_cpu_has(X86_FEATURE_PTI
))
200 WARN_ON(!had_user_mapping
);
203 * This is the first time we're mapping an LDT for this process.
204 * Sync the pgd to the usermode tables.
206 WARN_ON(had_kernel_mapping
);
207 if (boot_cpu_has(X86_FEATURE_PTI
))
208 WARN_ON(had_user_mapping
);
212 #ifdef CONFIG_X86_PAE
214 static pmd_t
*pgd_to_pmd_walk(pgd_t
*pgd
, unsigned long va
)
222 p4d
= p4d_offset(pgd
, va
);
226 pud
= pud_offset(p4d
, va
);
230 return pmd_offset(pud
, va
);
233 static void map_ldt_struct_to_user(struct mm_struct
*mm
)
235 pgd_t
*k_pgd
= pgd_offset(mm
, LDT_BASE_ADDR
);
236 pgd_t
*u_pgd
= kernel_to_user_pgdp(k_pgd
);
237 pmd_t
*k_pmd
, *u_pmd
;
239 k_pmd
= pgd_to_pmd_walk(k_pgd
, LDT_BASE_ADDR
);
240 u_pmd
= pgd_to_pmd_walk(u_pgd
, LDT_BASE_ADDR
);
242 if (boot_cpu_has(X86_FEATURE_PTI
) && !mm
->context
.ldt
)
243 set_pmd(u_pmd
, *k_pmd
);
246 static void sanity_check_ldt_mapping(struct mm_struct
*mm
)
248 pgd_t
*k_pgd
= pgd_offset(mm
, LDT_BASE_ADDR
);
249 pgd_t
*u_pgd
= kernel_to_user_pgdp(k_pgd
);
250 bool had_kernel
, had_user
;
251 pmd_t
*k_pmd
, *u_pmd
;
253 k_pmd
= pgd_to_pmd_walk(k_pgd
, LDT_BASE_ADDR
);
254 u_pmd
= pgd_to_pmd_walk(u_pgd
, LDT_BASE_ADDR
);
255 had_kernel
= (k_pmd
->pmd
!= 0);
256 had_user
= (u_pmd
->pmd
!= 0);
258 do_sanity_check(mm
, had_kernel
, had_user
);
261 #else /* !CONFIG_X86_PAE */
263 static void map_ldt_struct_to_user(struct mm_struct
*mm
)
265 pgd_t
*pgd
= pgd_offset(mm
, LDT_BASE_ADDR
);
267 if (boot_cpu_has(X86_FEATURE_PTI
) && !mm
->context
.ldt
)
268 set_pgd(kernel_to_user_pgdp(pgd
), *pgd
);
271 static void sanity_check_ldt_mapping(struct mm_struct
*mm
)
273 pgd_t
*pgd
= pgd_offset(mm
, LDT_BASE_ADDR
);
274 bool had_kernel
= (pgd
->pgd
!= 0);
275 bool had_user
= (kernel_to_user_pgdp(pgd
)->pgd
!= 0);
277 do_sanity_check(mm
, had_kernel
, had_user
);
280 #endif /* CONFIG_X86_PAE */
283 * If PTI is enabled, this maps the LDT into the kernelmode and
284 * usermode tables for the given mm.
287 map_ldt_struct(struct mm_struct
*mm
, struct ldt_struct
*ldt
, int slot
)
294 if (!boot_cpu_has(X86_FEATURE_PTI
))
298 * Any given ldt_struct should have map_ldt_struct() called at most
301 WARN_ON(ldt
->slot
!= -1);
303 /* Check if the current mappings are sane */
304 sanity_check_ldt_mapping(mm
);
306 is_vmalloc
= is_vmalloc_addr(ldt
->entries
);
308 nr_pages
= DIV_ROUND_UP(ldt
->nr_entries
* LDT_ENTRY_SIZE
, PAGE_SIZE
);
310 for (i
= 0; i
< nr_pages
; i
++) {
311 unsigned long offset
= i
<< PAGE_SHIFT
;
312 const void *src
= (char *)ldt
->entries
+ offset
;
317 va
= (unsigned long)ldt_slot_va(slot
) + offset
;
318 pfn
= is_vmalloc
? vmalloc_to_pfn(src
) :
319 page_to_pfn(virt_to_page(src
));
321 * Treat the PTI LDT range as a *userspace* range.
322 * get_locked_pte() will allocate all needed pagetables
323 * and account for them in this mm.
325 ptep
= get_locked_pte(mm
, va
, &ptl
);
329 * Map it RO so the easy to find address is not a primary
330 * target via some kernel interface which misses a
333 pte_prot
= __pgprot(__PAGE_KERNEL_RO
& ~_PAGE_GLOBAL
);
334 /* Filter out unsuppored __PAGE_KERNEL* bits: */
335 pgprot_val(pte_prot
) &= __supported_pte_mask
;
336 pte
= pfn_pte(pfn
, pte_prot
);
337 set_pte_at(mm
, va
, ptep
, pte
);
338 pte_unmap_unlock(ptep
, ptl
);
341 /* Propagate LDT mapping to the user page-table */
342 map_ldt_struct_to_user(mm
);
348 static void unmap_ldt_struct(struct mm_struct
*mm
, struct ldt_struct
*ldt
)
356 /* LDT map/unmap is only required for PTI */
357 if (!boot_cpu_has(X86_FEATURE_PTI
))
360 nr_pages
= DIV_ROUND_UP(ldt
->nr_entries
* LDT_ENTRY_SIZE
, PAGE_SIZE
);
362 for (i
= 0; i
< nr_pages
; i
++) {
363 unsigned long offset
= i
<< PAGE_SHIFT
;
367 va
= (unsigned long)ldt_slot_va(ldt
->slot
) + offset
;
368 ptep
= get_locked_pte(mm
, va
, &ptl
);
369 pte_clear(mm
, va
, ptep
);
370 pte_unmap_unlock(ptep
, ptl
);
373 va
= (unsigned long)ldt_slot_va(ldt
->slot
);
374 flush_tlb_mm_range(mm
, va
, va
+ nr_pages
* PAGE_SIZE
, PAGE_SHIFT
, false);
377 #else /* !CONFIG_PAGE_TABLE_ISOLATION */
380 map_ldt_struct(struct mm_struct
*mm
, struct ldt_struct
*ldt
, int slot
)
385 static void unmap_ldt_struct(struct mm_struct
*mm
, struct ldt_struct
*ldt
)
388 #endif /* CONFIG_PAGE_TABLE_ISOLATION */
390 static void free_ldt_pgtables(struct mm_struct
*mm
)
392 #ifdef CONFIG_PAGE_TABLE_ISOLATION
393 struct mmu_gather tlb
;
394 unsigned long start
= LDT_BASE_ADDR
;
395 unsigned long end
= LDT_END_ADDR
;
397 if (!boot_cpu_has(X86_FEATURE_PTI
))
400 tlb_gather_mmu(&tlb
, mm
, start
, end
);
401 free_pgd_range(&tlb
, start
, end
, start
, end
);
402 tlb_finish_mmu(&tlb
, start
, end
);
406 /* After calling this, the LDT is immutable. */
407 static void finalize_ldt_struct(struct ldt_struct
*ldt
)
409 paravirt_alloc_ldt(ldt
->entries
, ldt
->nr_entries
);
412 static void install_ldt(struct mm_struct
*mm
, struct ldt_struct
*ldt
)
414 mutex_lock(&mm
->context
.lock
);
416 /* Synchronizes with READ_ONCE in load_mm_ldt. */
417 smp_store_release(&mm
->context
.ldt
, ldt
);
419 /* Activate the LDT for all CPUs using currents mm. */
420 on_each_cpu_mask(mm_cpumask(mm
), flush_ldt
, mm
, true);
422 mutex_unlock(&mm
->context
.lock
);
425 static void free_ldt_struct(struct ldt_struct
*ldt
)
430 paravirt_free_ldt(ldt
->entries
, ldt
->nr_entries
);
431 if (ldt
->nr_entries
* LDT_ENTRY_SIZE
> PAGE_SIZE
)
432 vfree_atomic(ldt
->entries
);
434 free_page((unsigned long)ldt
->entries
);
439 * Called on fork from arch_dup_mmap(). Just copy the current LDT state,
440 * the new task is not running, so nothing can be installed.
442 int ldt_dup_context(struct mm_struct
*old_mm
, struct mm_struct
*mm
)
444 struct ldt_struct
*new_ldt
;
450 mutex_lock(&old_mm
->context
.lock
);
451 if (!old_mm
->context
.ldt
)
454 new_ldt
= alloc_ldt_struct(old_mm
->context
.ldt
->nr_entries
);
460 memcpy(new_ldt
->entries
, old_mm
->context
.ldt
->entries
,
461 new_ldt
->nr_entries
* LDT_ENTRY_SIZE
);
462 finalize_ldt_struct(new_ldt
);
464 retval
= map_ldt_struct(mm
, new_ldt
, 0);
466 free_ldt_pgtables(mm
);
467 free_ldt_struct(new_ldt
);
470 mm
->context
.ldt
= new_ldt
;
473 mutex_unlock(&old_mm
->context
.lock
);
478 * No need to lock the MM as we are the last user
480 * 64bit: Don't touch the LDT register - we're already in the next thread.
482 void destroy_context_ldt(struct mm_struct
*mm
)
484 free_ldt_struct(mm
->context
.ldt
);
485 mm
->context
.ldt
= NULL
;
488 void ldt_arch_exit_mmap(struct mm_struct
*mm
)
490 free_ldt_pgtables(mm
);
493 static int read_ldt(void __user
*ptr
, unsigned long bytecount
)
495 struct mm_struct
*mm
= current
->mm
;
496 unsigned long entries_size
;
499 down_read(&mm
->context
.ldt_usr_sem
);
501 if (!mm
->context
.ldt
) {
506 if (bytecount
> LDT_ENTRY_SIZE
* LDT_ENTRIES
)
507 bytecount
= LDT_ENTRY_SIZE
* LDT_ENTRIES
;
509 entries_size
= mm
->context
.ldt
->nr_entries
* LDT_ENTRY_SIZE
;
510 if (entries_size
> bytecount
)
511 entries_size
= bytecount
;
513 if (copy_to_user(ptr
, mm
->context
.ldt
->entries
, entries_size
)) {
518 if (entries_size
!= bytecount
) {
519 /* Zero-fill the rest and pretend we read bytecount bytes. */
520 if (clear_user(ptr
+ entries_size
, bytecount
- entries_size
)) {
528 up_read(&mm
->context
.ldt_usr_sem
);
532 static int read_default_ldt(void __user
*ptr
, unsigned long bytecount
)
534 /* CHECKME: Can we use _one_ random number ? */
536 unsigned long size
= 5 * sizeof(struct desc_struct
);
538 unsigned long size
= 128;
540 if (bytecount
> size
)
542 if (clear_user(ptr
, bytecount
))
547 static int write_ldt(void __user
*ptr
, unsigned long bytecount
, int oldmode
)
549 struct mm_struct
*mm
= current
->mm
;
550 struct ldt_struct
*new_ldt
, *old_ldt
;
551 unsigned int old_nr_entries
, new_nr_entries
;
552 struct user_desc ldt_info
;
553 struct desc_struct ldt
;
557 if (bytecount
!= sizeof(ldt_info
))
560 if (copy_from_user(&ldt_info
, ptr
, sizeof(ldt_info
)))
564 if (ldt_info
.entry_number
>= LDT_ENTRIES
)
566 if (ldt_info
.contents
== 3) {
569 if (ldt_info
.seg_not_present
== 0)
573 if ((oldmode
&& !ldt_info
.base_addr
&& !ldt_info
.limit
) ||
574 LDT_empty(&ldt_info
)) {
575 /* The user wants to clear the entry. */
576 memset(&ldt
, 0, sizeof(ldt
));
578 if (!IS_ENABLED(CONFIG_X86_16BIT
) && !ldt_info
.seg_32bit
) {
583 fill_ldt(&ldt
, &ldt_info
);
588 if (down_write_killable(&mm
->context
.ldt_usr_sem
))
591 old_ldt
= mm
->context
.ldt
;
592 old_nr_entries
= old_ldt
? old_ldt
->nr_entries
: 0;
593 new_nr_entries
= max(ldt_info
.entry_number
+ 1, old_nr_entries
);
596 new_ldt
= alloc_ldt_struct(new_nr_entries
);
601 memcpy(new_ldt
->entries
, old_ldt
->entries
, old_nr_entries
* LDT_ENTRY_SIZE
);
603 new_ldt
->entries
[ldt_info
.entry_number
] = ldt
;
604 finalize_ldt_struct(new_ldt
);
607 * If we are using PTI, map the new LDT into the userspace pagetables.
608 * If there is already an LDT, use the other slot so that other CPUs
609 * will continue to use the old LDT until install_ldt() switches
610 * them over to the new LDT.
612 error
= map_ldt_struct(mm
, new_ldt
, old_ldt
? !old_ldt
->slot
: 0);
615 * This only can fail for the first LDT setup. If an LDT is
616 * already installed then the PTE page is already
617 * populated. Mop up a half populated page table.
619 if (!WARN_ON_ONCE(old_ldt
))
620 free_ldt_pgtables(mm
);
621 free_ldt_struct(new_ldt
);
625 install_ldt(mm
, new_ldt
);
626 unmap_ldt_struct(mm
, old_ldt
);
627 free_ldt_struct(old_ldt
);
631 up_write(&mm
->context
.ldt_usr_sem
);
636 SYSCALL_DEFINE3(modify_ldt
, int , func
, void __user
* , ptr
,
637 unsigned long , bytecount
)
643 ret
= read_ldt(ptr
, bytecount
);
646 ret
= write_ldt(ptr
, bytecount
, 1);
649 ret
= read_default_ldt(ptr
, bytecount
);
652 ret
= write_ldt(ptr
, bytecount
, 0);
656 * The SYSCALL_DEFINE() macros give us an 'unsigned long'
657 * return type, but tht ABI for sys_modify_ldt() expects
658 * 'int'. This cast gives us an int-sized value in %rax
659 * for the return code. The 'unsigned' is necessary so
660 * the compiler does not try to sign-extend the negative
661 * return codes into the high half of the register when
662 * taking the value from int->long.
664 return (unsigned int)ret
;