2 * Copyright (C) 1992 Krishna Balasubramanian and Linus Torvalds
3 * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
4 * Copyright (C) 2002 Andi Kleen
6 * This handles calls from both 32bit and 64bit mode.
9 #include <linux/errno.h>
10 #include <linux/gfp.h>
11 #include <linux/sched.h>
12 #include <linux/string.h>
14 #include <linux/smp.h>
15 #include <linux/slab.h>
16 #include <linux/vmalloc.h>
17 #include <linux/uaccess.h>
21 #include <asm/mmu_context.h>
22 #include <asm/syscalls.h>
24 /* context.lock is held for us, so we don't need any locking. */
25 static void flush_ldt(void *current_mm
)
29 if (current
->active_mm
!= current_mm
)
32 pc
= ¤t
->active_mm
->context
;
33 set_ldt(pc
->ldt
->entries
, pc
->ldt
->size
);
36 /* The caller must call finalize_ldt_struct on the result. LDT starts zeroed. */
37 static struct ldt_struct
*alloc_ldt_struct(int size
)
39 struct ldt_struct
*new_ldt
;
42 if (size
> LDT_ENTRIES
)
45 new_ldt
= kmalloc(sizeof(struct ldt_struct
), GFP_KERNEL
);
49 BUILD_BUG_ON(LDT_ENTRY_SIZE
!= sizeof(struct desc_struct
));
50 alloc_size
= size
* LDT_ENTRY_SIZE
;
53 * Xen is very picky: it requires a page-aligned LDT that has no
54 * trailing nonzero bytes in any page that contains LDT descriptors.
55 * Keep it simple: zero the whole allocation and never allocate less
58 if (alloc_size
> PAGE_SIZE
)
59 new_ldt
->entries
= vzalloc(alloc_size
);
61 new_ldt
->entries
= (void *)get_zeroed_page(GFP_KERNEL
);
63 if (!new_ldt
->entries
) {
72 /* After calling this, the LDT is immutable. */
73 static void finalize_ldt_struct(struct ldt_struct
*ldt
)
75 paravirt_alloc_ldt(ldt
->entries
, ldt
->size
);
78 /* context.lock is held */
79 static void install_ldt(struct mm_struct
*current_mm
,
80 struct ldt_struct
*ldt
)
82 /* Synchronizes with lockless_dereference in load_mm_ldt. */
83 smp_store_release(¤t_mm
->context
.ldt
, ldt
);
85 /* Activate the LDT for all CPUs using current_mm. */
86 on_each_cpu_mask(mm_cpumask(current_mm
), flush_ldt
, current_mm
, true);
89 static void free_ldt_struct(struct ldt_struct
*ldt
)
94 paravirt_free_ldt(ldt
->entries
, ldt
->size
);
95 if (ldt
->size
* LDT_ENTRY_SIZE
> PAGE_SIZE
)
98 free_page((unsigned long)ldt
->entries
);
103 * we do not have to muck with descriptors here, that is
104 * done in switch_mm() as needed.
106 int init_new_context(struct task_struct
*tsk
, struct mm_struct
*mm
)
108 struct ldt_struct
*new_ldt
;
109 struct mm_struct
*old_mm
;
112 mutex_init(&mm
->context
.lock
);
113 old_mm
= current
->mm
;
115 mm
->context
.ldt
= NULL
;
119 mutex_lock(&old_mm
->context
.lock
);
120 if (!old_mm
->context
.ldt
) {
121 mm
->context
.ldt
= NULL
;
125 new_ldt
= alloc_ldt_struct(old_mm
->context
.ldt
->size
);
131 memcpy(new_ldt
->entries
, old_mm
->context
.ldt
->entries
,
132 new_ldt
->size
* LDT_ENTRY_SIZE
);
133 finalize_ldt_struct(new_ldt
);
135 mm
->context
.ldt
= new_ldt
;
138 mutex_unlock(&old_mm
->context
.lock
);
143 * No need to lock the MM as we are the last user
145 * 64bit: Don't touch the LDT register - we're already in the next thread.
147 void destroy_context(struct mm_struct
*mm
)
149 free_ldt_struct(mm
->context
.ldt
);
150 mm
->context
.ldt
= NULL
;
153 static int read_ldt(void __user
*ptr
, unsigned long bytecount
)
157 struct mm_struct
*mm
= current
->mm
;
159 mutex_lock(&mm
->context
.lock
);
161 if (!mm
->context
.ldt
) {
166 if (bytecount
> LDT_ENTRY_SIZE
* LDT_ENTRIES
)
167 bytecount
= LDT_ENTRY_SIZE
* LDT_ENTRIES
;
169 size
= mm
->context
.ldt
->size
* LDT_ENTRY_SIZE
;
170 if (size
> bytecount
)
173 if (copy_to_user(ptr
, mm
->context
.ldt
->entries
, size
)) {
178 if (size
!= bytecount
) {
179 /* Zero-fill the rest and pretend we read bytecount bytes. */
180 if (clear_user(ptr
+ size
, bytecount
- size
)) {
188 mutex_unlock(&mm
->context
.lock
);
192 static int read_default_ldt(void __user
*ptr
, unsigned long bytecount
)
194 /* CHECKME: Can we use _one_ random number ? */
196 unsigned long size
= 5 * sizeof(struct desc_struct
);
198 unsigned long size
= 128;
200 if (bytecount
> size
)
202 if (clear_user(ptr
, bytecount
))
207 static int write_ldt(void __user
*ptr
, unsigned long bytecount
, int oldmode
)
209 struct mm_struct
*mm
= current
->mm
;
210 struct desc_struct ldt
;
212 struct user_desc ldt_info
;
213 int oldsize
, newsize
;
214 struct ldt_struct
*new_ldt
, *old_ldt
;
217 if (bytecount
!= sizeof(ldt_info
))
220 if (copy_from_user(&ldt_info
, ptr
, sizeof(ldt_info
)))
224 if (ldt_info
.entry_number
>= LDT_ENTRIES
)
226 if (ldt_info
.contents
== 3) {
229 if (ldt_info
.seg_not_present
== 0)
233 if ((oldmode
&& !ldt_info
.base_addr
&& !ldt_info
.limit
) ||
234 LDT_empty(&ldt_info
)) {
235 /* The user wants to clear the entry. */
236 memset(&ldt
, 0, sizeof(ldt
));
238 if (!IS_ENABLED(CONFIG_X86_16BIT
) && !ldt_info
.seg_32bit
) {
243 fill_ldt(&ldt
, &ldt_info
);
248 mutex_lock(&mm
->context
.lock
);
250 old_ldt
= mm
->context
.ldt
;
251 oldsize
= old_ldt
? old_ldt
->size
: 0;
252 newsize
= max((int)(ldt_info
.entry_number
+ 1), oldsize
);
255 new_ldt
= alloc_ldt_struct(newsize
);
260 memcpy(new_ldt
->entries
, old_ldt
->entries
, oldsize
* LDT_ENTRY_SIZE
);
261 new_ldt
->entries
[ldt_info
.entry_number
] = ldt
;
262 finalize_ldt_struct(new_ldt
);
264 install_ldt(mm
, new_ldt
);
265 free_ldt_struct(old_ldt
);
269 mutex_unlock(&mm
->context
.lock
);
274 asmlinkage
int sys_modify_ldt(int func
, void __user
*ptr
,
275 unsigned long bytecount
)
281 ret
= read_ldt(ptr
, bytecount
);
284 ret
= write_ldt(ptr
, bytecount
, 1);
287 ret
= read_default_ldt(ptr
, bytecount
);
290 ret
= write_ldt(ptr
, bytecount
, 0);