1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 1992 Krishna Balasubramanian and Linus Torvalds
4 * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
5 * Copyright (C) 2002 Andi Kleen
7 * This handles calls from both 32bit and 64bit mode.
15 #include <linux/errno.h>
16 #include <linux/gfp.h>
17 #include <linux/sched.h>
18 #include <linux/string.h>
20 #include <linux/smp.h>
21 #include <linux/syscalls.h>
22 #include <linux/slab.h>
23 #include <linux/vmalloc.h>
24 #include <linux/uaccess.h>
29 #include <asm/mmu_context.h>
30 #include <asm/pgtable_areas.h>
32 /* This is a multiple of PAGE_SIZE. */
33 #define LDT_SLOT_STRIDE (LDT_ENTRIES * LDT_ENTRY_SIZE)
35 static inline void *ldt_slot_va(int slot
)
37 return (void *)(LDT_BASE_ADDR
+ LDT_SLOT_STRIDE
* slot
);
40 void load_mm_ldt(struct mm_struct
*mm
)
42 struct ldt_struct
*ldt
;
44 /* READ_ONCE synchronizes with smp_store_release */
45 ldt
= READ_ONCE(mm
->context
.ldt
);
48 * Any change to mm->context.ldt is followed by an IPI to all
49 * CPUs with the mm active. The LDT will not be freed until
50 * after the IPI is handled by all such CPUs. This means that,
51 * if the ldt_struct changes before we return, the values we see
52 * will be safe, and the new values will be loaded before we run
55 * NB: don't try to convert this to use RCU without extreme care.
56 * We would still need IRQs off, because we don't want to change
57 * the local LDT after an IPI loaded a newer value than the one
62 if (static_cpu_has(X86_FEATURE_PTI
)) {
63 if (WARN_ON_ONCE((unsigned long)ldt
->slot
> 1)) {
65 * Whoops -- either the new LDT isn't mapped
66 * (if slot == -1) or is mapped into a bogus
74 * If page table isolation is enabled, ldt->entries
75 * will not be mapped in the userspace pagetables.
76 * Tell the CPU to access the LDT through the alias
77 * at ldt_slot_va(ldt->slot).
79 set_ldt(ldt_slot_va(ldt
->slot
), ldt
->nr_entries
);
81 set_ldt(ldt
->entries
, ldt
->nr_entries
);
88 void switch_ldt(struct mm_struct
*prev
, struct mm_struct
*next
)
91 * Load the LDT if either the old or new mm had an LDT.
93 * An mm will never go from having an LDT to not having an LDT. Two
94 * mms never share an LDT, so we don't gain anything by checking to
95 * see whether the LDT changed. There's also no guarantee that
96 * prev->context.ldt actually matches LDTR, but, if LDTR is non-NULL,
97 * then prev->context.ldt will also be non-NULL.
99 * If we really cared, we could optimize the case where prev == next
100 * and we're exiting lazy mode. Most of the time, if this happens,
101 * we don't actually need to reload LDTR, but modify_ldt() is mostly
102 * used by legacy code and emulators where we don't need this level of
105 * This uses | instead of || because it generates better code.
107 if (unlikely((unsigned long)prev
->context
.ldt
|
108 (unsigned long)next
->context
.ldt
))
111 DEBUG_LOCKS_WARN_ON(preemptible());
114 static void refresh_ldt_segments(void)
120 * Make sure that the cached DS and ES descriptors match the updated
123 savesegment(ds
, sel
);
124 if ((sel
& SEGMENT_TI_MASK
) == SEGMENT_LDT
)
125 loadsegment(ds
, sel
);
127 savesegment(es
, sel
);
128 if ((sel
& SEGMENT_TI_MASK
) == SEGMENT_LDT
)
129 loadsegment(es
, sel
);
133 /* context.lock is held by the task which issued the smp function call */
134 static void flush_ldt(void *__mm
)
136 struct mm_struct
*mm
= __mm
;
138 if (this_cpu_read(cpu_tlbstate
.loaded_mm
) != mm
)
143 refresh_ldt_segments();
146 /* The caller must call finalize_ldt_struct on the result. LDT starts zeroed. */
147 static struct ldt_struct
*alloc_ldt_struct(unsigned int num_entries
)
149 struct ldt_struct
*new_ldt
;
150 unsigned int alloc_size
;
152 if (num_entries
> LDT_ENTRIES
)
155 new_ldt
= kmalloc(sizeof(struct ldt_struct
), GFP_KERNEL
);
159 BUILD_BUG_ON(LDT_ENTRY_SIZE
!= sizeof(struct desc_struct
));
160 alloc_size
= num_entries
* LDT_ENTRY_SIZE
;
163 * Xen is very picky: it requires a page-aligned LDT that has no
164 * trailing nonzero bytes in any page that contains LDT descriptors.
165 * Keep it simple: zero the whole allocation and never allocate less
168 if (alloc_size
> PAGE_SIZE
)
169 new_ldt
->entries
= vzalloc(alloc_size
);
171 new_ldt
->entries
= (void *)get_zeroed_page(GFP_KERNEL
);
173 if (!new_ldt
->entries
) {
178 /* The new LDT isn't aliased for PTI yet. */
181 new_ldt
->nr_entries
= num_entries
;
185 #ifdef CONFIG_PAGE_TABLE_ISOLATION
187 static void do_sanity_check(struct mm_struct
*mm
,
188 bool had_kernel_mapping
,
189 bool had_user_mapping
)
191 if (mm
->context
.ldt
) {
193 * We already had an LDT. The top-level entry should already
194 * have been allocated and synchronized with the usermode
197 WARN_ON(!had_kernel_mapping
);
198 if (boot_cpu_has(X86_FEATURE_PTI
))
199 WARN_ON(!had_user_mapping
);
202 * This is the first time we're mapping an LDT for this process.
203 * Sync the pgd to the usermode tables.
205 WARN_ON(had_kernel_mapping
);
206 if (boot_cpu_has(X86_FEATURE_PTI
))
207 WARN_ON(had_user_mapping
);
211 #ifdef CONFIG_X86_PAE
213 static pmd_t
*pgd_to_pmd_walk(pgd_t
*pgd
, unsigned long va
)
221 p4d
= p4d_offset(pgd
, va
);
225 pud
= pud_offset(p4d
, va
);
229 return pmd_offset(pud
, va
);
232 static void map_ldt_struct_to_user(struct mm_struct
*mm
)
234 pgd_t
*k_pgd
= pgd_offset(mm
, LDT_BASE_ADDR
);
235 pgd_t
*u_pgd
= kernel_to_user_pgdp(k_pgd
);
236 pmd_t
*k_pmd
, *u_pmd
;
238 k_pmd
= pgd_to_pmd_walk(k_pgd
, LDT_BASE_ADDR
);
239 u_pmd
= pgd_to_pmd_walk(u_pgd
, LDT_BASE_ADDR
);
241 if (boot_cpu_has(X86_FEATURE_PTI
) && !mm
->context
.ldt
)
242 set_pmd(u_pmd
, *k_pmd
);
245 static void sanity_check_ldt_mapping(struct mm_struct
*mm
)
247 pgd_t
*k_pgd
= pgd_offset(mm
, LDT_BASE_ADDR
);
248 pgd_t
*u_pgd
= kernel_to_user_pgdp(k_pgd
);
249 bool had_kernel
, had_user
;
250 pmd_t
*k_pmd
, *u_pmd
;
252 k_pmd
= pgd_to_pmd_walk(k_pgd
, LDT_BASE_ADDR
);
253 u_pmd
= pgd_to_pmd_walk(u_pgd
, LDT_BASE_ADDR
);
254 had_kernel
= (k_pmd
->pmd
!= 0);
255 had_user
= (u_pmd
->pmd
!= 0);
257 do_sanity_check(mm
, had_kernel
, had_user
);
260 #else /* !CONFIG_X86_PAE */
262 static void map_ldt_struct_to_user(struct mm_struct
*mm
)
264 pgd_t
*pgd
= pgd_offset(mm
, LDT_BASE_ADDR
);
266 if (boot_cpu_has(X86_FEATURE_PTI
) && !mm
->context
.ldt
)
267 set_pgd(kernel_to_user_pgdp(pgd
), *pgd
);
270 static void sanity_check_ldt_mapping(struct mm_struct
*mm
)
272 pgd_t
*pgd
= pgd_offset(mm
, LDT_BASE_ADDR
);
273 bool had_kernel
= (pgd
->pgd
!= 0);
274 bool had_user
= (kernel_to_user_pgdp(pgd
)->pgd
!= 0);
276 do_sanity_check(mm
, had_kernel
, had_user
);
279 #endif /* CONFIG_X86_PAE */
282 * If PTI is enabled, this maps the LDT into the kernelmode and
283 * usermode tables for the given mm.
286 map_ldt_struct(struct mm_struct
*mm
, struct ldt_struct
*ldt
, int slot
)
293 if (!boot_cpu_has(X86_FEATURE_PTI
))
297 * Any given ldt_struct should have map_ldt_struct() called at most
300 WARN_ON(ldt
->slot
!= -1);
302 /* Check if the current mappings are sane */
303 sanity_check_ldt_mapping(mm
);
305 is_vmalloc
= is_vmalloc_addr(ldt
->entries
);
307 nr_pages
= DIV_ROUND_UP(ldt
->nr_entries
* LDT_ENTRY_SIZE
, PAGE_SIZE
);
309 for (i
= 0; i
< nr_pages
; i
++) {
310 unsigned long offset
= i
<< PAGE_SHIFT
;
311 const void *src
= (char *)ldt
->entries
+ offset
;
316 va
= (unsigned long)ldt_slot_va(slot
) + offset
;
317 pfn
= is_vmalloc
? vmalloc_to_pfn(src
) :
318 page_to_pfn(virt_to_page(src
));
320 * Treat the PTI LDT range as a *userspace* range.
321 * get_locked_pte() will allocate all needed pagetables
322 * and account for them in this mm.
324 ptep
= get_locked_pte(mm
, va
, &ptl
);
328 * Map it RO so the easy to find address is not a primary
329 * target via some kernel interface which misses a
332 pte_prot
= __pgprot(__PAGE_KERNEL_RO
& ~_PAGE_GLOBAL
);
333 /* Filter out unsuppored __PAGE_KERNEL* bits: */
334 pgprot_val(pte_prot
) &= __supported_pte_mask
;
335 pte
= pfn_pte(pfn
, pte_prot
);
336 set_pte_at(mm
, va
, ptep
, pte
);
337 pte_unmap_unlock(ptep
, ptl
);
340 /* Propagate LDT mapping to the user page-table */
341 map_ldt_struct_to_user(mm
);
347 static void unmap_ldt_struct(struct mm_struct
*mm
, struct ldt_struct
*ldt
)
355 /* LDT map/unmap is only required for PTI */
356 if (!boot_cpu_has(X86_FEATURE_PTI
))
359 nr_pages
= DIV_ROUND_UP(ldt
->nr_entries
* LDT_ENTRY_SIZE
, PAGE_SIZE
);
361 for (i
= 0; i
< nr_pages
; i
++) {
362 unsigned long offset
= i
<< PAGE_SHIFT
;
366 va
= (unsigned long)ldt_slot_va(ldt
->slot
) + offset
;
367 ptep
= get_locked_pte(mm
, va
, &ptl
);
368 pte_clear(mm
, va
, ptep
);
369 pte_unmap_unlock(ptep
, ptl
);
372 va
= (unsigned long)ldt_slot_va(ldt
->slot
);
373 flush_tlb_mm_range(mm
, va
, va
+ nr_pages
* PAGE_SIZE
, PAGE_SHIFT
, false);
376 #else /* !CONFIG_PAGE_TABLE_ISOLATION */
379 map_ldt_struct(struct mm_struct
*mm
, struct ldt_struct
*ldt
, int slot
)
384 static void unmap_ldt_struct(struct mm_struct
*mm
, struct ldt_struct
*ldt
)
387 #endif /* CONFIG_PAGE_TABLE_ISOLATION */
389 static void free_ldt_pgtables(struct mm_struct
*mm
)
391 #ifdef CONFIG_PAGE_TABLE_ISOLATION
392 struct mmu_gather tlb
;
393 unsigned long start
= LDT_BASE_ADDR
;
394 unsigned long end
= LDT_END_ADDR
;
396 if (!boot_cpu_has(X86_FEATURE_PTI
))
399 tlb_gather_mmu(&tlb
, mm
, start
, end
);
400 free_pgd_range(&tlb
, start
, end
, start
, end
);
401 tlb_finish_mmu(&tlb
, start
, end
);
405 /* After calling this, the LDT is immutable. */
406 static void finalize_ldt_struct(struct ldt_struct
*ldt
)
408 paravirt_alloc_ldt(ldt
->entries
, ldt
->nr_entries
);
411 static void install_ldt(struct mm_struct
*mm
, struct ldt_struct
*ldt
)
413 mutex_lock(&mm
->context
.lock
);
415 /* Synchronizes with READ_ONCE in load_mm_ldt. */
416 smp_store_release(&mm
->context
.ldt
, ldt
);
418 /* Activate the LDT for all CPUs using currents mm. */
419 on_each_cpu_mask(mm_cpumask(mm
), flush_ldt
, mm
, true);
421 mutex_unlock(&mm
->context
.lock
);
424 static void free_ldt_struct(struct ldt_struct
*ldt
)
429 paravirt_free_ldt(ldt
->entries
, ldt
->nr_entries
);
430 if (ldt
->nr_entries
* LDT_ENTRY_SIZE
> PAGE_SIZE
)
431 vfree_atomic(ldt
->entries
);
433 free_page((unsigned long)ldt
->entries
);
438 * Called on fork from arch_dup_mmap(). Just copy the current LDT state,
439 * the new task is not running, so nothing can be installed.
441 int ldt_dup_context(struct mm_struct
*old_mm
, struct mm_struct
*mm
)
443 struct ldt_struct
*new_ldt
;
449 mutex_lock(&old_mm
->context
.lock
);
450 if (!old_mm
->context
.ldt
)
453 new_ldt
= alloc_ldt_struct(old_mm
->context
.ldt
->nr_entries
);
459 memcpy(new_ldt
->entries
, old_mm
->context
.ldt
->entries
,
460 new_ldt
->nr_entries
* LDT_ENTRY_SIZE
);
461 finalize_ldt_struct(new_ldt
);
463 retval
= map_ldt_struct(mm
, new_ldt
, 0);
465 free_ldt_pgtables(mm
);
466 free_ldt_struct(new_ldt
);
469 mm
->context
.ldt
= new_ldt
;
472 mutex_unlock(&old_mm
->context
.lock
);
477 * No need to lock the MM as we are the last user
479 * 64bit: Don't touch the LDT register - we're already in the next thread.
481 void destroy_context_ldt(struct mm_struct
*mm
)
483 free_ldt_struct(mm
->context
.ldt
);
484 mm
->context
.ldt
= NULL
;
487 void ldt_arch_exit_mmap(struct mm_struct
*mm
)
489 free_ldt_pgtables(mm
);
492 static int read_ldt(void __user
*ptr
, unsigned long bytecount
)
494 struct mm_struct
*mm
= current
->mm
;
495 unsigned long entries_size
;
498 down_read(&mm
->context
.ldt_usr_sem
);
500 if (!mm
->context
.ldt
) {
505 if (bytecount
> LDT_ENTRY_SIZE
* LDT_ENTRIES
)
506 bytecount
= LDT_ENTRY_SIZE
* LDT_ENTRIES
;
508 entries_size
= mm
->context
.ldt
->nr_entries
* LDT_ENTRY_SIZE
;
509 if (entries_size
> bytecount
)
510 entries_size
= bytecount
;
512 if (copy_to_user(ptr
, mm
->context
.ldt
->entries
, entries_size
)) {
517 if (entries_size
!= bytecount
) {
518 /* Zero-fill the rest and pretend we read bytecount bytes. */
519 if (clear_user(ptr
+ entries_size
, bytecount
- entries_size
)) {
527 up_read(&mm
->context
.ldt_usr_sem
);
531 static int read_default_ldt(void __user
*ptr
, unsigned long bytecount
)
533 /* CHECKME: Can we use _one_ random number ? */
535 unsigned long size
= 5 * sizeof(struct desc_struct
);
537 unsigned long size
= 128;
539 if (bytecount
> size
)
541 if (clear_user(ptr
, bytecount
))
546 static int write_ldt(void __user
*ptr
, unsigned long bytecount
, int oldmode
)
548 struct mm_struct
*mm
= current
->mm
;
549 struct ldt_struct
*new_ldt
, *old_ldt
;
550 unsigned int old_nr_entries
, new_nr_entries
;
551 struct user_desc ldt_info
;
552 struct desc_struct ldt
;
556 if (bytecount
!= sizeof(ldt_info
))
559 if (copy_from_user(&ldt_info
, ptr
, sizeof(ldt_info
)))
563 if (ldt_info
.entry_number
>= LDT_ENTRIES
)
565 if (ldt_info
.contents
== 3) {
568 if (ldt_info
.seg_not_present
== 0)
572 if ((oldmode
&& !ldt_info
.base_addr
&& !ldt_info
.limit
) ||
573 LDT_empty(&ldt_info
)) {
574 /* The user wants to clear the entry. */
575 memset(&ldt
, 0, sizeof(ldt
));
577 if (!IS_ENABLED(CONFIG_X86_16BIT
) && !ldt_info
.seg_32bit
) {
582 fill_ldt(&ldt
, &ldt_info
);
587 if (down_write_killable(&mm
->context
.ldt_usr_sem
))
590 old_ldt
= mm
->context
.ldt
;
591 old_nr_entries
= old_ldt
? old_ldt
->nr_entries
: 0;
592 new_nr_entries
= max(ldt_info
.entry_number
+ 1, old_nr_entries
);
595 new_ldt
= alloc_ldt_struct(new_nr_entries
);
600 memcpy(new_ldt
->entries
, old_ldt
->entries
, old_nr_entries
* LDT_ENTRY_SIZE
);
602 new_ldt
->entries
[ldt_info
.entry_number
] = ldt
;
603 finalize_ldt_struct(new_ldt
);
606 * If we are using PTI, map the new LDT into the userspace pagetables.
607 * If there is already an LDT, use the other slot so that other CPUs
608 * will continue to use the old LDT until install_ldt() switches
609 * them over to the new LDT.
611 error
= map_ldt_struct(mm
, new_ldt
, old_ldt
? !old_ldt
->slot
: 0);
614 * This only can fail for the first LDT setup. If an LDT is
615 * already installed then the PTE page is already
616 * populated. Mop up a half populated page table.
618 if (!WARN_ON_ONCE(old_ldt
))
619 free_ldt_pgtables(mm
);
620 free_ldt_struct(new_ldt
);
624 install_ldt(mm
, new_ldt
);
625 unmap_ldt_struct(mm
, old_ldt
);
626 free_ldt_struct(old_ldt
);
630 up_write(&mm
->context
.ldt_usr_sem
);
635 SYSCALL_DEFINE3(modify_ldt
, int , func
, void __user
* , ptr
,
636 unsigned long , bytecount
)
642 ret
= read_ldt(ptr
, bytecount
);
645 ret
= write_ldt(ptr
, bytecount
, 1);
648 ret
= read_default_ldt(ptr
, bytecount
);
651 ret
= write_ldt(ptr
, bytecount
, 0);
655 * The SYSCALL_DEFINE() macros give us an 'unsigned long'
656 * return type, but tht ABI for sys_modify_ldt() expects
657 * 'int'. This cast gives us an int-sized value in %rax
658 * for the return code. The 'unsigned' is necessary so
659 * the compiler does not try to sign-extend the negative
660 * return codes into the high half of the register when
661 * taking the value from int->long.
663 return (unsigned int)ret
;