2 * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Licensed under the GPL
7 #include <linux/sched.h>
8 #include <linux/slab.h>
9 #include <linux/syscalls.h>
10 #include <linux/uaccess.h>
11 #include <asm/unistd.h>
14 #include <sysdep/tls.h>
16 static inline int modify_ldt (int func
, void *ptr
, unsigned long bytecount
)
18 return syscall(__NR_modify_ldt
, func
, ptr
, bytecount
);
21 static long write_ldt_entry(struct mm_id
*mm_idp
, int func
,
22 struct user_desc
*desc
, void **addr
, int done
)
26 res
= syscall_stub_data(mm_idp
, (unsigned long *)desc
,
27 (sizeof(*desc
) + sizeof(long) - 1) &
31 unsigned long args
[] = { func
,
32 (unsigned long)stub_addr
,
35 res
= run_syscall_stub(mm_idp
, __NR_modify_ldt
, args
,
43 * In skas mode, we hold our own ldt data in UML.
44 * Thus, the code implementing sys_modify_ldt_skas
45 * is very similar to (and mostly stolen from) sys_modify_ldt
46 * for arch/i386/kernel/ldt.c
47 * The routines copied and modified in part are:
51 * - sys_modify_ldt_skas
54 static int read_ldt(void __user
* ptr
, unsigned long bytecount
)
58 uml_ldt_t
*ldt
= ¤t
->mm
->context
.arch
.ldt
;
60 if (!ldt
->entry_count
)
62 if (bytecount
> LDT_ENTRY_SIZE
*LDT_ENTRIES
)
63 bytecount
= LDT_ENTRY_SIZE
*LDT_ENTRIES
;
66 mutex_lock(&ldt
->lock
);
67 if (ldt
->entry_count
<= LDT_DIRECT_ENTRIES
) {
68 size
= LDT_ENTRY_SIZE
*LDT_DIRECT_ENTRIES
;
71 if (copy_to_user(ptr
, ldt
->u
.entries
, size
))
77 for (i
=0; i
<ldt
->entry_count
/LDT_ENTRIES_PER_PAGE
&& bytecount
;
82 if (copy_to_user(ptr
, ldt
->u
.pages
[i
], size
)) {
90 mutex_unlock(&ldt
->lock
);
92 if (bytecount
== 0 || err
== -EFAULT
)
95 if (clear_user(ptr
, bytecount
))
102 static int read_default_ldt(void __user
* ptr
, unsigned long bytecount
)
106 if (bytecount
> 5*LDT_ENTRY_SIZE
)
107 bytecount
= 5*LDT_ENTRY_SIZE
;
111 * UML doesn't support lcall7 and lcall27.
112 * So, we don't really have a default ldt, but emulate
113 * an empty ldt of common host default ldt size.
115 if (clear_user(ptr
, bytecount
))
121 static int write_ldt(void __user
* ptr
, unsigned long bytecount
, int func
)
123 uml_ldt_t
*ldt
= ¤t
->mm
->context
.arch
.ldt
;
124 struct mm_id
* mm_idp
= ¤t
->mm
->context
.id
;
126 struct user_desc ldt_info
;
127 struct ldt_entry entry0
, *ldt_p
;
131 if (bytecount
!= sizeof(ldt_info
))
134 if (copy_from_user(&ldt_info
, ptr
, sizeof(ldt_info
)))
138 if (ldt_info
.entry_number
>= LDT_ENTRIES
)
140 if (ldt_info
.contents
== 3) {
143 if (ldt_info
.seg_not_present
== 0)
147 mutex_lock(&ldt
->lock
);
149 err
= write_ldt_entry(mm_idp
, func
, &ldt_info
, &addr
, 1);
153 if (ldt_info
.entry_number
>= ldt
->entry_count
&&
154 ldt_info
.entry_number
>= LDT_DIRECT_ENTRIES
) {
155 for (i
=ldt
->entry_count
/LDT_ENTRIES_PER_PAGE
;
156 i
*LDT_ENTRIES_PER_PAGE
<= ldt_info
.entry_number
;
159 memcpy(&entry0
, ldt
->u
.entries
,
161 ldt
->u
.pages
[i
] = (struct ldt_entry
*)
162 __get_free_page(GFP_KERNEL
|__GFP_ZERO
);
163 if (!ldt
->u
.pages
[i
]) {
165 /* Undo the change in host */
166 memset(&ldt_info
, 0, sizeof(ldt_info
));
167 write_ldt_entry(mm_idp
, 1, &ldt_info
, &addr
, 1);
171 memcpy(ldt
->u
.pages
[0], &entry0
,
173 memcpy(ldt
->u
.pages
[0]+1, ldt
->u
.entries
+1,
174 sizeof(entry0
)*(LDT_DIRECT_ENTRIES
-1));
176 ldt
->entry_count
= (i
+ 1) * LDT_ENTRIES_PER_PAGE
;
179 if (ldt
->entry_count
<= ldt_info
.entry_number
)
180 ldt
->entry_count
= ldt_info
.entry_number
+ 1;
182 if (ldt
->entry_count
<= LDT_DIRECT_ENTRIES
)
183 ldt_p
= ldt
->u
.entries
+ ldt_info
.entry_number
;
185 ldt_p
= ldt
->u
.pages
[ldt_info
.entry_number
/LDT_ENTRIES_PER_PAGE
] +
186 ldt_info
.entry_number
%LDT_ENTRIES_PER_PAGE
;
188 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0 &&
189 (func
== 1 || LDT_empty(&ldt_info
))) {
195 ldt_info
.useable
= 0;
196 ldt_p
->a
= LDT_entry_a(&ldt_info
);
197 ldt_p
->b
= LDT_entry_b(&ldt_info
);
202 mutex_unlock(&ldt
->lock
);
207 static long do_modify_ldt_skas(int func
, void __user
*ptr
,
208 unsigned long bytecount
)
214 ret
= read_ldt(ptr
, bytecount
);
218 ret
= write_ldt(ptr
, bytecount
, func
);
221 ret
= read_default_ldt(ptr
, bytecount
);
227 static DEFINE_SPINLOCK(host_ldt_lock
);
228 static short dummy_list
[9] = {0, -1};
229 static short * host_ldt_entries
= NULL
;
231 static void ldt_get_host_info(void)
234 struct ldt_entry
* ldt
;
236 int i
, size
, k
, order
;
238 spin_lock(&host_ldt_lock
);
240 if (host_ldt_entries
!= NULL
) {
241 spin_unlock(&host_ldt_lock
);
244 host_ldt_entries
= dummy_list
+1;
246 spin_unlock(&host_ldt_lock
);
248 for (i
= LDT_PAGES_MAX
-1, order
=0; i
; i
>>=1, order
++)
251 ldt
= (struct ldt_entry
*)
252 __get_free_pages(GFP_KERNEL
|__GFP_ZERO
, order
);
254 printk(KERN_ERR
"ldt_get_host_info: couldn't allocate buffer "
259 ret
= modify_ldt(0, ldt
, (1<<order
)*PAGE_SIZE
);
261 printk(KERN_ERR
"ldt_get_host_info: couldn't read host ldt\n");
265 /* default_ldt is active, simply write an empty entry 0 */
266 host_ldt_entries
= dummy_list
;
270 for (i
=0, size
=0; i
<ret
/LDT_ENTRY_SIZE
; i
++) {
271 if (ldt
[i
].a
!= 0 || ldt
[i
].b
!= 0)
275 if (size
< ARRAY_SIZE(dummy_list
))
276 host_ldt_entries
= dummy_list
;
278 size
= (size
+ 1) * sizeof(dummy_list
[0]);
279 tmp
= kmalloc(size
, GFP_KERNEL
);
281 printk(KERN_ERR
"ldt_get_host_info: couldn't allocate "
285 host_ldt_entries
= tmp
;
288 for (i
=0, k
=0; i
<ret
/LDT_ENTRY_SIZE
; i
++) {
289 if (ldt
[i
].a
!= 0 || ldt
[i
].b
!= 0)
290 host_ldt_entries
[k
++] = i
;
292 host_ldt_entries
[k
] = -1;
295 free_pages((unsigned long)ldt
, order
);
298 long init_new_ldt(struct mm_context
*new_mm
, struct mm_context
*from_mm
)
300 struct user_desc desc
;
307 mutex_init(&new_mm
->arch
.ldt
.lock
);
310 memset(&desc
, 0, sizeof(desc
));
312 * Now we try to retrieve info about the ldt, we
313 * inherited from the host. All ldt-entries found
314 * will be reset in the following loop
317 for (num_p
=host_ldt_entries
; *num_p
!= -1; num_p
++) {
318 desc
.entry_number
= *num_p
;
319 err
= write_ldt_entry(&new_mm
->id
, 1, &desc
,
320 &addr
, *(num_p
+ 1) == -1);
324 new_mm
->arch
.ldt
.entry_count
= 0;
330 * Our local LDT is used to supply the data for
331 * modify_ldt(READLDT), if PTRACE_LDT isn't available,
332 * i.e., we have to use the stub for modify_ldt, which
333 * can't handle the big read buffer of up to 64kB.
335 mutex_lock(&from_mm
->arch
.ldt
.lock
);
336 if (from_mm
->arch
.ldt
.entry_count
<= LDT_DIRECT_ENTRIES
)
337 memcpy(new_mm
->arch
.ldt
.u
.entries
, from_mm
->arch
.ldt
.u
.entries
,
338 sizeof(new_mm
->arch
.ldt
.u
.entries
));
340 i
= from_mm
->arch
.ldt
.entry_count
/ LDT_ENTRIES_PER_PAGE
;
342 page
= __get_free_page(GFP_KERNEL
|__GFP_ZERO
);
347 new_mm
->arch
.ldt
.u
.pages
[i
] =
348 (struct ldt_entry
*) page
;
349 memcpy(new_mm
->arch
.ldt
.u
.pages
[i
],
350 from_mm
->arch
.ldt
.u
.pages
[i
], PAGE_SIZE
);
353 new_mm
->arch
.ldt
.entry_count
= from_mm
->arch
.ldt
.entry_count
;
354 mutex_unlock(&from_mm
->arch
.ldt
.lock
);
361 void free_ldt(struct mm_context
*mm
)
365 if (mm
->arch
.ldt
.entry_count
> LDT_DIRECT_ENTRIES
) {
366 i
= mm
->arch
.ldt
.entry_count
/ LDT_ENTRIES_PER_PAGE
;
368 free_page((long) mm
->arch
.ldt
.u
.pages
[i
]);
370 mm
->arch
.ldt
.entry_count
= 0;
373 SYSCALL_DEFINE3(modify_ldt
, int , func
, void __user
* , ptr
,
374 unsigned long , bytecount
)
376 /* See non-um modify_ldt() for why we do this cast */
377 return (unsigned int)do_modify_ldt_skas(func
, ptr
, bytecount
);