Linux 3.11-rc3
[cris-mirror.git] / arch / x86 / include / asm / desc.h
blobb90e5dfeee462b55bd942b4bdc5fab2eb4817403
1 #ifndef _ASM_X86_DESC_H
2 #define _ASM_X86_DESC_H
4 #include <asm/desc_defs.h>
5 #include <asm/ldt.h>
6 #include <asm/mmu.h>
8 #include <linux/smp.h>
9 #include <linux/percpu.h>
11 static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *info)
13 desc->limit0 = info->limit & 0x0ffff;
15 desc->base0 = (info->base_addr & 0x0000ffff);
16 desc->base1 = (info->base_addr & 0x00ff0000) >> 16;
18 desc->type = (info->read_exec_only ^ 1) << 1;
19 desc->type |= info->contents << 2;
21 desc->s = 1;
22 desc->dpl = 0x3;
23 desc->p = info->seg_not_present ^ 1;
24 desc->limit = (info->limit & 0xf0000) >> 16;
25 desc->avl = info->useable;
26 desc->d = info->seg_32bit;
27 desc->g = info->limit_in_pages;
29 desc->base2 = (info->base_addr & 0xff000000) >> 24;
31 * Don't allow setting of the lm bit. It would confuse
32 * user_64bit_mode and would get overridden by sysret anyway.
34 desc->l = 0;
37 extern struct desc_ptr idt_descr;
38 extern gate_desc idt_table[];
39 extern struct desc_ptr debug_idt_descr;
40 extern gate_desc debug_idt_table[];
42 struct gdt_page {
43 struct desc_struct gdt[GDT_ENTRIES];
44 } __attribute__((aligned(PAGE_SIZE)));
46 DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
48 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
50 return per_cpu(gdt_page, cpu).gdt;
53 #ifdef CONFIG_X86_64
55 static inline void pack_gate(gate_desc *gate, unsigned type, unsigned long func,
56 unsigned dpl, unsigned ist, unsigned seg)
58 gate->offset_low = PTR_LOW(func);
59 gate->segment = __KERNEL_CS;
60 gate->ist = ist;
61 gate->p = 1;
62 gate->dpl = dpl;
63 gate->zero0 = 0;
64 gate->zero1 = 0;
65 gate->type = type;
66 gate->offset_middle = PTR_MIDDLE(func);
67 gate->offset_high = PTR_HIGH(func);
70 #else
71 static inline void pack_gate(gate_desc *gate, unsigned char type,
72 unsigned long base, unsigned dpl, unsigned flags,
73 unsigned short seg)
75 gate->a = (seg << 16) | (base & 0xffff);
76 gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
79 #endif
81 static inline int desc_empty(const void *ptr)
83 const u32 *desc = ptr;
85 return !(desc[0] | desc[1]);
88 #ifdef CONFIG_PARAVIRT
89 #include <asm/paravirt.h>
90 #else
91 #define load_TR_desc() native_load_tr_desc()
92 #define load_gdt(dtr) native_load_gdt(dtr)
93 #define load_idt(dtr) native_load_idt(dtr)
94 #define load_tr(tr) asm volatile("ltr %0"::"m" (tr))
95 #define load_ldt(ldt) asm volatile("lldt %0"::"m" (ldt))
97 #define store_gdt(dtr) native_store_gdt(dtr)
98 #define store_idt(dtr) native_store_idt(dtr)
99 #define store_tr(tr) (tr = native_store_tr())
101 #define load_TLS(t, cpu) native_load_tls(t, cpu)
102 #define set_ldt native_set_ldt
104 #define write_ldt_entry(dt, entry, desc) native_write_ldt_entry(dt, entry, desc)
105 #define write_gdt_entry(dt, entry, desc, type) native_write_gdt_entry(dt, entry, desc, type)
106 #define write_idt_entry(dt, entry, g) native_write_idt_entry(dt, entry, g)
108 static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries)
112 static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
115 #endif /* CONFIG_PARAVIRT */
117 #define store_ldt(ldt) asm("sldt %0" : "=m"(ldt))
119 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
121 memcpy(&idt[entry], gate, sizeof(*gate));
124 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
126 memcpy(&ldt[entry], desc, 8);
129 static inline void
130 native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int type)
132 unsigned int size;
134 switch (type) {
135 case DESC_TSS: size = sizeof(tss_desc); break;
136 case DESC_LDT: size = sizeof(ldt_desc); break;
137 default: size = sizeof(*gdt); break;
140 memcpy(&gdt[entry], desc, size);
143 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
144 unsigned long limit, unsigned char type,
145 unsigned char flags)
147 desc->a = ((base & 0xffff) << 16) | (limit & 0xffff);
148 desc->b = (base & 0xff000000) | ((base & 0xff0000) >> 16) |
149 (limit & 0x000f0000) | ((type & 0xff) << 8) |
150 ((flags & 0xf) << 20);
151 desc->p = 1;
155 static inline void set_tssldt_descriptor(void *d, unsigned long addr, unsigned type, unsigned size)
157 #ifdef CONFIG_X86_64
158 struct ldttss_desc64 *desc = d;
160 memset(desc, 0, sizeof(*desc));
162 desc->limit0 = size & 0xFFFF;
163 desc->base0 = PTR_LOW(addr);
164 desc->base1 = PTR_MIDDLE(addr) & 0xFF;
165 desc->type = type;
166 desc->p = 1;
167 desc->limit1 = (size >> 16) & 0xF;
168 desc->base2 = (PTR_MIDDLE(addr) >> 8) & 0xFF;
169 desc->base3 = PTR_HIGH(addr);
170 #else
171 pack_descriptor((struct desc_struct *)d, addr, size, 0x80 | type, 0);
172 #endif
175 static inline void __set_tss_desc(unsigned cpu, unsigned int entry, void *addr)
177 struct desc_struct *d = get_cpu_gdt_table(cpu);
178 tss_desc tss;
181 * sizeof(unsigned long) coming from an extra "long" at the end
182 * of the iobitmap. See tss_struct definition in processor.h
184 * -1? seg base+limit should be pointing to the address of the
185 * last valid byte
187 set_tssldt_descriptor(&tss, (unsigned long)addr, DESC_TSS,
188 IO_BITMAP_OFFSET + IO_BITMAP_BYTES +
189 sizeof(unsigned long) - 1);
190 write_gdt_entry(d, entry, &tss, DESC_TSS);
193 #define set_tss_desc(cpu, addr) __set_tss_desc(cpu, GDT_ENTRY_TSS, addr)
195 static inline void native_set_ldt(const void *addr, unsigned int entries)
197 if (likely(entries == 0))
198 asm volatile("lldt %w0"::"q" (0));
199 else {
200 unsigned cpu = smp_processor_id();
201 ldt_desc ldt;
203 set_tssldt_descriptor(&ldt, (unsigned long)addr, DESC_LDT,
204 entries * LDT_ENTRY_SIZE - 1);
205 write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_LDT,
206 &ldt, DESC_LDT);
207 asm volatile("lldt %w0"::"q" (GDT_ENTRY_LDT*8));
211 static inline void native_load_tr_desc(void)
213 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
216 static inline void native_load_gdt(const struct desc_ptr *dtr)
218 asm volatile("lgdt %0"::"m" (*dtr));
221 static inline void native_load_idt(const struct desc_ptr *dtr)
223 asm volatile("lidt %0"::"m" (*dtr));
226 static inline void native_store_gdt(struct desc_ptr *dtr)
228 asm volatile("sgdt %0":"=m" (*dtr));
231 static inline void native_store_idt(struct desc_ptr *dtr)
233 asm volatile("sidt %0":"=m" (*dtr));
236 static inline unsigned long native_store_tr(void)
238 unsigned long tr;
240 asm volatile("str %0":"=r" (tr));
242 return tr;
245 static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
247 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
248 unsigned int i;
250 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
251 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
254 #define _LDT_empty(info) \
255 ((info)->base_addr == 0 && \
256 (info)->limit == 0 && \
257 (info)->contents == 0 && \
258 (info)->read_exec_only == 1 && \
259 (info)->seg_32bit == 0 && \
260 (info)->limit_in_pages == 0 && \
261 (info)->seg_not_present == 1 && \
262 (info)->useable == 0)
264 #ifdef CONFIG_X86_64
265 #define LDT_empty(info) (_LDT_empty(info) && ((info)->lm == 0))
266 #else
267 #define LDT_empty(info) (_LDT_empty(info))
268 #endif
270 static inline void clear_LDT(void)
272 set_ldt(NULL, 0);
276 * load one particular LDT into the current CPU
278 static inline void load_LDT_nolock(mm_context_t *pc)
280 set_ldt(pc->ldt, pc->size);
283 static inline void load_LDT(mm_context_t *pc)
285 preempt_disable();
286 load_LDT_nolock(pc);
287 preempt_enable();
290 static inline unsigned long get_desc_base(const struct desc_struct *desc)
292 return (unsigned)(desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24));
295 static inline void set_desc_base(struct desc_struct *desc, unsigned long base)
297 desc->base0 = base & 0xffff;
298 desc->base1 = (base >> 16) & 0xff;
299 desc->base2 = (base >> 24) & 0xff;
302 static inline unsigned long get_desc_limit(const struct desc_struct *desc)
304 return desc->limit0 | (desc->limit << 16);
307 static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
309 desc->limit0 = limit & 0xffff;
310 desc->limit = (limit >> 16) & 0xf;
313 #ifdef CONFIG_X86_64
314 static inline void set_nmi_gate(int gate, void *addr)
316 gate_desc s;
318 pack_gate(&s, GATE_INTERRUPT, (unsigned long)addr, 0, 0, __KERNEL_CS);
319 write_idt_entry(debug_idt_table, gate, &s);
321 #endif
323 #ifdef CONFIG_TRACING
324 extern struct desc_ptr trace_idt_descr;
325 extern gate_desc trace_idt_table[];
326 static inline void write_trace_idt_entry(int entry, const gate_desc *gate)
328 write_idt_entry(trace_idt_table, entry, gate);
330 #else
331 static inline void write_trace_idt_entry(int entry, const gate_desc *gate)
334 #endif
336 static inline void _set_gate(int gate, unsigned type, void *addr,
337 unsigned dpl, unsigned ist, unsigned seg)
339 gate_desc s;
341 pack_gate(&s, type, (unsigned long)addr, dpl, ist, seg);
343 * does not need to be atomic because it is only done once at
344 * setup time
346 write_idt_entry(idt_table, gate, &s);
347 write_trace_idt_entry(gate, &s);
351 * This needs to use 'idt_table' rather than 'idt', and
352 * thus use the _nonmapped_ version of the IDT, as the
353 * Pentium F0 0F bugfix can have resulted in the mapped
354 * IDT being write-protected.
356 static inline void set_intr_gate(unsigned int n, void *addr)
358 BUG_ON((unsigned)n > 0xFF);
359 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
362 extern int first_system_vector;
363 /* used_vectors is BITMAP for irq is not managed by percpu vector_irq */
364 extern unsigned long used_vectors[];
366 static inline void alloc_system_vector(int vector)
368 if (!test_bit(vector, used_vectors)) {
369 set_bit(vector, used_vectors);
370 if (first_system_vector > vector)
371 first_system_vector = vector;
372 } else {
373 BUG();
377 #ifdef CONFIG_TRACING
378 static inline void trace_set_intr_gate(unsigned int gate, void *addr)
380 gate_desc s;
382 pack_gate(&s, GATE_INTERRUPT, (unsigned long)addr, 0, 0, __KERNEL_CS);
383 write_idt_entry(trace_idt_table, gate, &s);
386 static inline void __trace_alloc_intr_gate(unsigned int n, void *addr)
388 trace_set_intr_gate(n, addr);
390 #else
391 static inline void trace_set_intr_gate(unsigned int gate, void *addr)
395 #define __trace_alloc_intr_gate(n, addr)
396 #endif
398 static inline void __alloc_intr_gate(unsigned int n, void *addr)
400 set_intr_gate(n, addr);
403 #define alloc_intr_gate(n, addr) \
404 do { \
405 alloc_system_vector(n); \
406 __alloc_intr_gate(n, addr); \
407 __trace_alloc_intr_gate(n, trace_##addr); \
408 } while (0)
411 * This routine sets up an interrupt gate at directory privilege level 3.
413 static inline void set_system_intr_gate(unsigned int n, void *addr)
415 BUG_ON((unsigned)n > 0xFF);
416 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
419 static inline void set_system_trap_gate(unsigned int n, void *addr)
421 BUG_ON((unsigned)n > 0xFF);
422 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
425 static inline void set_trap_gate(unsigned int n, void *addr)
427 BUG_ON((unsigned)n > 0xFF);
428 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
431 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
433 BUG_ON((unsigned)n > 0xFF);
434 _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
437 static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
439 BUG_ON((unsigned)n > 0xFF);
440 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
443 static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
445 BUG_ON((unsigned)n > 0xFF);
446 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
449 #ifdef CONFIG_X86_64
450 DECLARE_PER_CPU(u32, debug_idt_ctr);
451 static inline bool is_debug_idt_enabled(void)
453 if (this_cpu_read(debug_idt_ctr))
454 return true;
456 return false;
459 static inline void load_debug_idt(void)
461 load_idt((const struct desc_ptr *)&debug_idt_descr);
463 #else
464 static inline bool is_debug_idt_enabled(void)
466 return false;
469 static inline void load_debug_idt(void)
472 #endif
474 #ifdef CONFIG_TRACING
475 extern atomic_t trace_idt_ctr;
476 static inline bool is_trace_idt_enabled(void)
478 if (atomic_read(&trace_idt_ctr))
479 return true;
481 return false;
484 static inline void load_trace_idt(void)
486 load_idt((const struct desc_ptr *)&trace_idt_descr);
488 #else
489 static inline bool is_trace_idt_enabled(void)
491 return false;
494 static inline void load_trace_idt(void)
497 #endif
500 * The load_current_idt() must be called with interrupts disabled
501 * to avoid races. That way the IDT will always be set back to the expected
502 * descriptor. It's also called when a CPU is being initialized, and
503 * that doesn't need to disable interrupts, as nothing should be
504 * bothering the CPU then.
506 static inline void load_current_idt(void)
508 if (is_debug_idt_enabled())
509 load_debug_idt();
510 else if (is_trace_idt_enabled())
511 load_trace_idt();
512 else
513 load_idt((const struct desc_ptr *)&idt_descr);
515 #endif /* _ASM_X86_DESC_H */