1 #include <linux/kernel.h>
2 #include <linux/errno.h>
3 #include <linux/sched.h>
4 #include <linux/user.h>
5 #include <linux/regset.h>
6 #include <linux/syscalls.h>
8 #include <asm/uaccess.h>
11 #include <asm/processor.h>
12 #include <asm/proto.h>
17 * sys_alloc_thread_area: get a yet unused TLS descriptor index.
19 static int get_free_idx(void)
21 struct thread_struct
*t
= ¤t
->thread
;
24 for (idx
= 0; idx
< GDT_ENTRY_TLS_ENTRIES
; idx
++)
25 if (desc_empty(&t
->tls_array
[idx
]))
26 return idx
+ GDT_ENTRY_TLS_MIN
;
30 static bool tls_desc_okay(const struct user_desc
*info
)
33 * For historical reasons (i.e. no one ever documented how any
34 * of the segmentation APIs work), user programs can and do
35 * assume that a struct user_desc that's all zeros except for
36 * entry_number means "no segment at all". This never actually
37 * worked. In fact, up to Linux 3.19, a struct user_desc like
38 * this would create a 16-bit read-write segment with base and
39 * limit both equal to zero.
41 * That was close enough to "no segment at all" until we
42 * hardened this function to disallow 16-bit TLS segments. Fix
43 * it up by interpreting these zeroed segments the way that they
44 * were almost certainly intended to be interpreted.
46 * The correct way to ask for "no segment at all" is to specify
47 * a user_desc that satisfies LDT_empty. To keep everything
48 * working, we accept both.
50 * Note that there's a similar kludge in modify_ldt -- look at
51 * the distinction between modes 1 and 0x11.
53 if (LDT_empty(info
) || LDT_zero(info
))
57 * espfix is required for 16-bit data segments, but espfix
58 * only works for LDT segments.
63 /* Only allow data segments in the TLS array. */
64 if (info
->contents
> 1)
68 * Non-present segments with DPL 3 present an interesting attack
69 * surface. The kernel should handle such segments correctly,
70 * but TLS is very difficult to protect in a sandbox, so prevent
71 * such segments from being created.
73 * If userspace needs to remove a TLS entry, it can still delete
76 if (info
->seg_not_present
)
82 static void set_tls_desc(struct task_struct
*p
, int idx
,
83 const struct user_desc
*info
, int n
)
85 struct thread_struct
*t
= &p
->thread
;
86 struct desc_struct
*desc
= &t
->tls_array
[idx
- GDT_ENTRY_TLS_MIN
];
90 * We must not get preempted while modifying the TLS.
95 if (LDT_empty(info
) || LDT_zero(info
))
96 desc
->a
= desc
->b
= 0;
103 if (t
== ¤t
->thread
)
110 * Set a given TLS descriptor:
112 int do_set_thread_area(struct task_struct
*p
, int idx
,
113 struct user_desc __user
*u_info
,
116 struct user_desc info
;
118 if (copy_from_user(&info
, u_info
, sizeof(info
)))
121 if (!tls_desc_okay(&info
))
125 idx
= info
.entry_number
;
128 * index -1 means the kernel should try to find and
129 * allocate an empty descriptor:
131 if (idx
== -1 && can_allocate
) {
132 idx
= get_free_idx();
135 if (put_user(idx
, &u_info
->entry_number
))
139 if (idx
< GDT_ENTRY_TLS_MIN
|| idx
> GDT_ENTRY_TLS_MAX
)
142 set_tls_desc(p
, idx
, &info
, 1);
147 SYSCALL_DEFINE1(set_thread_area
, struct user_desc __user
*, u_info
)
149 return do_set_thread_area(current
, -1, u_info
, 1);
154 * Get the current Thread-Local Storage area:
157 static void fill_user_desc(struct user_desc
*info
, int idx
,
158 const struct desc_struct
*desc
)
161 memset(info
, 0, sizeof(*info
));
162 info
->entry_number
= idx
;
163 info
->base_addr
= get_desc_base(desc
);
164 info
->limit
= get_desc_limit(desc
);
165 info
->seg_32bit
= desc
->d
;
166 info
->contents
= desc
->type
>> 2;
167 info
->read_exec_only
= !(desc
->type
& 2);
168 info
->limit_in_pages
= desc
->g
;
169 info
->seg_not_present
= !desc
->p
;
170 info
->useable
= desc
->avl
;
176 int do_get_thread_area(struct task_struct
*p
, int idx
,
177 struct user_desc __user
*u_info
)
179 struct user_desc info
;
181 if (idx
== -1 && get_user(idx
, &u_info
->entry_number
))
184 if (idx
< GDT_ENTRY_TLS_MIN
|| idx
> GDT_ENTRY_TLS_MAX
)
187 fill_user_desc(&info
, idx
,
188 &p
->thread
.tls_array
[idx
- GDT_ENTRY_TLS_MIN
]);
190 if (copy_to_user(u_info
, &info
, sizeof(info
)))
195 SYSCALL_DEFINE1(get_thread_area
, struct user_desc __user
*, u_info
)
197 return do_get_thread_area(current
, -1, u_info
);
200 int regset_tls_active(struct task_struct
*target
,
201 const struct user_regset
*regset
)
203 struct thread_struct
*t
= &target
->thread
;
204 int n
= GDT_ENTRY_TLS_ENTRIES
;
205 while (n
> 0 && desc_empty(&t
->tls_array
[n
- 1]))
210 int regset_tls_get(struct task_struct
*target
, const struct user_regset
*regset
,
211 unsigned int pos
, unsigned int count
,
212 void *kbuf
, void __user
*ubuf
)
214 const struct desc_struct
*tls
;
216 if (pos
>= GDT_ENTRY_TLS_ENTRIES
* sizeof(struct user_desc
) ||
217 (pos
% sizeof(struct user_desc
)) != 0 ||
218 (count
% sizeof(struct user_desc
)) != 0)
221 pos
/= sizeof(struct user_desc
);
222 count
/= sizeof(struct user_desc
);
224 tls
= &target
->thread
.tls_array
[pos
];
227 struct user_desc
*info
= kbuf
;
229 fill_user_desc(info
++, GDT_ENTRY_TLS_MIN
+ pos
++,
232 struct user_desc __user
*u_info
= ubuf
;
233 while (count
-- > 0) {
234 struct user_desc info
;
235 fill_user_desc(&info
, GDT_ENTRY_TLS_MIN
+ pos
++, tls
++);
236 if (__copy_to_user(u_info
++, &info
, sizeof(info
)))
244 int regset_tls_set(struct task_struct
*target
, const struct user_regset
*regset
,
245 unsigned int pos
, unsigned int count
,
246 const void *kbuf
, const void __user
*ubuf
)
248 struct user_desc infobuf
[GDT_ENTRY_TLS_ENTRIES
];
249 const struct user_desc
*info
;
252 if (pos
>= GDT_ENTRY_TLS_ENTRIES
* sizeof(struct user_desc
) ||
253 (pos
% sizeof(struct user_desc
)) != 0 ||
254 (count
% sizeof(struct user_desc
)) != 0)
259 else if (__copy_from_user(infobuf
, ubuf
, count
))
264 for (i
= 0; i
< count
/ sizeof(struct user_desc
); i
++)
265 if (!tls_desc_okay(info
+ i
))
269 GDT_ENTRY_TLS_MIN
+ (pos
/ sizeof(struct user_desc
)),
270 info
, count
/ sizeof(struct user_desc
));