5 #include <asm/segment.h>
7 #define CPU_16BIT_STACK_SIZE 1024
11 #include <linux/preempt.h>
12 #include <linux/smp.h>
13 #include <linux/percpu.h>
17 extern struct desc_struct cpu_gdt_table
[GDT_ENTRIES
];
18 DECLARE_PER_CPU(struct desc_struct
, cpu_gdt_table
[GDT_ENTRIES
]);
20 DECLARE_PER_CPU(unsigned char, cpu_16bit_stack
[CPU_16BIT_STACK_SIZE
]);
22 struct Xgt_desc_struct
{
24 unsigned long address
__attribute__((packed
));
26 } __attribute__ ((packed
));
28 extern struct Xgt_desc_struct idt_descr
, cpu_gdt_descr
[NR_CPUS
];
30 #define load_TR_desc() __asm__ __volatile__("ltr %w0"::"q" (GDT_ENTRY_TSS*8))
31 #define load_LDT_desc() __asm__ __volatile__("lldt %w0"::"q" (GDT_ENTRY_LDT*8))
33 #define load_gdt(dtr) __asm__ __volatile("lgdt %0"::"m" (*dtr))
34 #define load_idt(dtr) __asm__ __volatile("lidt %0"::"m" (*dtr))
35 #define load_tr(tr) __asm__ __volatile("ltr %0"::"mr" (tr))
36 #define load_ldt(ldt) __asm__ __volatile("lldt %0"::"mr" (ldt))
38 #define store_gdt(dtr) __asm__ ("sgdt %0":"=m" (*dtr))
39 #define store_idt(dtr) __asm__ ("sidt %0":"=m" (*dtr))
40 #define store_tr(tr) __asm__ ("str %0":"=mr" (tr))
41 #define store_ldt(ldt) __asm__ ("sldt %0":"=mr" (ldt))
44 * This is the ldt that every process will get unless we need
45 * something other than this.
47 extern struct desc_struct default_ldt
[];
48 extern void set_intr_gate(unsigned int irq
, void * addr
);
50 #define _set_tssldt_desc(n,addr,limit,type) \
51 __asm__ __volatile__ ("movw %w3,0(%2)\n\t" \
52 "movw %w1,2(%2)\n\t" \
54 "movb %b1,4(%2)\n\t" \
57 "movb %h1,7(%2)\n\t" \
59 : "=m"(*(n)) : "q" (addr), "r"(n), "ir"(limit), "i"(type))
61 static inline void __set_tss_desc(unsigned int cpu
, unsigned int entry
, void *addr
)
63 _set_tssldt_desc(&per_cpu(cpu_gdt_table
, cpu
)[entry
], (int)addr
,
64 offsetof(struct tss_struct
, __cacheline_filler
) - 1, 0x89);
67 #define set_tss_desc(cpu,addr) __set_tss_desc(cpu, GDT_ENTRY_TSS, addr)
69 static inline void set_ldt_desc(unsigned int cpu
, void *addr
, unsigned int size
)
71 _set_tssldt_desc(&per_cpu(cpu_gdt_table
, cpu
)[GDT_ENTRY_LDT
], (int)addr
, ((size
<< 3)-1), 0x82);
74 #define LDT_entry_a(info) \
75 ((((info)->base_addr & 0x0000ffff) << 16) | ((info)->limit & 0x0ffff))
77 #define LDT_entry_b(info) \
78 (((info)->base_addr & 0xff000000) | \
79 (((info)->base_addr & 0x00ff0000) >> 16) | \
80 ((info)->limit & 0xf0000) | \
81 (((info)->read_exec_only ^ 1) << 9) | \
82 ((info)->contents << 10) | \
83 (((info)->seg_not_present ^ 1) << 15) | \
84 ((info)->seg_32bit << 22) | \
85 ((info)->limit_in_pages << 23) | \
86 ((info)->useable << 20) | \
89 #define LDT_empty(info) (\
90 (info)->base_addr == 0 && \
91 (info)->limit == 0 && \
92 (info)->contents == 0 && \
93 (info)->read_exec_only == 1 && \
94 (info)->seg_32bit == 0 && \
95 (info)->limit_in_pages == 0 && \
96 (info)->seg_not_present == 1 && \
97 (info)->useable == 0 )
99 static inline void write_ldt_entry(void *ldt
, int entry
, __u32 entry_a
, __u32 entry_b
)
101 __u32
*lp
= (__u32
*)((char *)ldt
+ entry
*8);
107 # error update this code.
110 static inline void load_TLS(struct thread_struct
*t
, unsigned int cpu
)
112 #define C(i) per_cpu(cpu_gdt_table, cpu)[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i]
117 static inline void clear_LDT(void)
121 set_ldt_desc(cpu
, &default_ldt
[0], 5);
127 * load one particular LDT into the current CPU
129 static inline void load_LDT_nolock(mm_context_t
*pc
, int cpu
)
131 void *segments
= pc
->ldt
;
132 int count
= pc
->size
;
134 if (likely(!count
)) {
135 segments
= &default_ldt
[0];
139 set_ldt_desc(cpu
, segments
, count
);
143 static inline void load_LDT(mm_context_t
*pc
)
146 load_LDT_nolock(pc
, cpu
);
150 static inline unsigned long get_desc_base(unsigned long *desc
)
153 base
= ((desc
[0] >> 16) & 0x0000ffff) |
154 ((desc
[1] << 16) & 0x00ff0000) |
155 (desc
[1] & 0xff000000);
159 #endif /* !__ASSEMBLY__ */