First pass at x86_64. Mostly consists of branching i386 stuff over.
[newos.git] / kernel / arch / x86_64 / arch_cpu.c
blob89d97c7219bccafcc5df1017d18257cc04684147
1 /*
2 ** Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
3 ** Distributed under the terms of the NewOS License.
4 */
5 #include <kernel/kernel.h>
6 #include <kernel/cpu.h>
7 #include <kernel/arch/cpu.h>
8 #include <kernel/heap.h>
9 #include <kernel/vm.h>
10 #include <kernel/debug.h>
11 #include <kernel/smp.h>
12 #include <kernel/debug.h>
13 #include <kernel/arch/i386/selector.h>
14 #include <kernel/arch/int.h>
15 #include <kernel/arch/i386/interrupts.h>
16 #include <newos/errors.h>
18 #include <boot/stage2.h>
20 #include <string.h>
21 #include <stdio.h>
22 #include <stdlib.h>
24 /* a few debug functions that get added to the kernel debugger menu */
25 static void dbg_in(int argc, char **argv);
26 static void dbg_out(int argc, char **argv);
28 static struct tss **tss;
29 static int *tss_loaded;
31 /* tss to switch to a special 'task' on the double fault handler */
32 static struct tss double_fault_tss;
33 static uint32 double_fault_stack[1024];
35 static desc_table *gdt = 0;
37 int arch_cpu_preboot_init(kernel_args *ka)
39 write_dr3(0);
40 return 0;
43 int arch_cpu_init(kernel_args *ka)
45 setup_system_time(ka->arch_args.system_time_cv_factor);
47 return 0;
50 int arch_cpu_init2(kernel_args *ka)
52 region_id rid;
53 struct tss_descriptor *tss_d;
54 unsigned int i;
56 // account for the segment descriptors
57 gdt = (desc_table *)ka->arch_args.vir_gdt;
58 vm_create_anonymous_region(vm_get_kernel_aspace_id(), "gdt", (void **)&gdt,
59 REGION_ADDR_EXACT_ADDRESS, PAGE_SIZE, REGION_WIRING_WIRED_ALREADY, LOCK_RW|LOCK_KERNEL);
61 i386_selector_init( gdt ); // pass the new gdt
63 tss = kmalloc(sizeof(struct tss *) * ka->num_cpus);
64 if(tss == NULL) {
65 panic("arch_cpu_init2: could not allocate buffer for tss pointers\n");
66 return ERR_NO_MEMORY;
69 tss_loaded = kmalloc(sizeof(int) * ka->num_cpus);
70 if(tss == NULL) {
71 panic("arch_cpu_init2: could not allocate buffer for tss booleans\n");
72 return ERR_NO_MEMORY;
74 memset(tss_loaded, 0, sizeof(int) * ka->num_cpus);
76 for(i=0; i<ka->num_cpus; i++) {
77 char tss_name[16];
79 sprintf(tss_name, "tss%d", i);
80 rid = vm_create_anonymous_region(vm_get_kernel_aspace_id(), tss_name, (void **)&tss[i],
81 REGION_ADDR_ANY_ADDRESS, PAGE_SIZE, REGION_WIRING_WIRED, LOCK_RW|LOCK_KERNEL);
82 if(rid < 0) {
83 panic("arch_cpu_init2: unable to create region for tss\n");
84 return ERR_NO_MEMORY;
87 memset(tss[i], 0, sizeof(struct tss));
88 tss[i]->ss0 = KERNEL_DATA_SEG;
90 // add TSS descriptor for this new TSS
91 tss_d = (struct tss_descriptor *)&gdt[6 + i];
92 tss_d->limit_00_15 = sizeof(struct tss) & 0xffff;
93 tss_d->limit_19_16 = 0; // not this long
94 tss_d->base_00_15 = (addr_t)tss[i] & 0xffff;
95 tss_d->base_23_16 = ((addr_t)tss[i] >> 16) & 0xff;
96 tss_d->base_31_24 = (addr_t)tss[i] >> 24;
97 tss_d->type = 0x9;
98 tss_d->zero = 0;
99 tss_d->dpl = 0;
100 tss_d->present = 1;
101 tss_d->avail = 0;
102 tss_d->zero1 = 0;
103 tss_d->zero2 = 1;
104 tss_d->granularity = 1;
108 /* set up the double fault tss */
109 memset(&double_fault_tss, 0, sizeof(double_fault_tss));
110 double_fault_tss.sp0 = (uint32)double_fault_stack + sizeof(double_fault_stack);
111 double_fault_tss.ss0 = KERNEL_DATA_SEG;
112 read_cr3(double_fault_tss.cr3); // copy the current cr3 to the double fault cr3
113 double_fault_tss.eip = (uint32)&trap8;
114 double_fault_tss.es = KERNEL_DATA_SEG;
115 double_fault_tss.cs = KERNEL_CODE_SEG;
116 double_fault_tss.ss = KERNEL_DATA_SEG;
117 double_fault_tss.ds = KERNEL_DATA_SEG;
118 double_fault_tss.fs = KERNEL_DATA_SEG;
119 double_fault_tss.gs = KERNEL_DATA_SEG;
120 double_fault_tss.ldt_seg_selector = KERNEL_DATA_SEG;
122 tss_d = (struct tss_descriptor *)&gdt[5];
123 tss_d->limit_00_15 = sizeof(struct tss) & 0xffff;
124 tss_d->limit_19_16 = 0; // not this long
125 tss_d->base_00_15 = (addr_t)&double_fault_tss & 0xffff;
126 tss_d->base_23_16 = ((addr_t)&double_fault_tss >> 16) & 0xff;
127 tss_d->base_31_24 = (addr_t)&double_fault_tss >> 24;
128 tss_d->type = 0x9; // tss descriptor, not busy
129 tss_d->zero = 0;
130 tss_d->dpl = 0;
131 tss_d->present = 1;
132 tss_d->avail = 0;
133 tss_d->zero1 = 0;
134 tss_d->zero2 = 1;
135 tss_d->granularity = 1;
137 i386_set_task_gate(8, DOUBLE_FAULT_TSS);
139 // set up a few debug commands (in, out)
140 dbg_add_command(&dbg_in, "in", "read I/O port");
141 dbg_add_command(&dbg_out, "out", "write I/O port");
143 return 0;
146 desc_table *i386_get_gdt(void)
148 return gdt;
151 void i386_set_kstack(addr_t kstack)
153 int curr_cpu = smp_get_current_cpu();
155 // dprintf("i386_set_kstack: kstack 0x%x, cpu %d\n", kstack, curr_cpu);
156 if(tss_loaded[curr_cpu] == 0) {
157 short seg = (TSS + 8*curr_cpu);
158 asm("movw %0, %%ax;"
159 "ltr %%ax;" : : "r" (seg) : "eax");
160 tss_loaded[curr_cpu] = 1;
163 tss[curr_cpu]->sp0 = kstack;
164 // dprintf("done\n");
167 void arch_cpu_invalidate_TLB_range(addr_t start, addr_t end)
169 for(; start < end; start += PAGE_SIZE) {
170 invalidate_TLB(start);
174 void arch_cpu_invalidate_TLB_list(addr_t pages[], int num_pages)
176 int i;
177 for(i=0; i<num_pages; i++) {
178 invalidate_TLB(pages[i]);
182 int arch_cpu_user_memcpy(void *to, const void *from, size_t size, addr_t *fault_handler)
184 char *tmp = (char *)to;
185 char *s = (char *)from;
187 *fault_handler = (addr_t)&&error;
189 while(size--)
190 *tmp++ = *s++;
192 *fault_handler = 0;
194 return 0;
195 error:
196 *fault_handler = 0;
197 return ERR_VM_BAD_USER_MEMORY;
200 int arch_cpu_user_strcpy(char *to, const char *from, addr_t *fault_handler)
202 *fault_handler = (addr_t)&&error;
204 while((*to++ = *from++) != '\0')
207 *fault_handler = 0;
209 return 0;
210 error:
211 *fault_handler = 0;
212 return ERR_VM_BAD_USER_MEMORY;
215 int arch_cpu_user_strncpy(char *to, const char *from, size_t size, addr_t *fault_handler)
217 *fault_handler = (addr_t)&&error;
219 while(size-- && (*to++ = *from++) != '\0')
222 *fault_handler = 0;
224 return 0;
225 error:
226 *fault_handler = 0;
227 return ERR_VM_BAD_USER_MEMORY;
230 int arch_cpu_user_memset(void *s, char c, size_t count, addr_t *fault_handler)
232 char *xs = (char *) s;
234 *fault_handler = (addr_t)&&error;
236 while (count--)
237 *xs++ = c;
239 *fault_handler = 0;
241 return 0;
242 error:
243 *fault_handler = 0;
244 return ERR_VM_BAD_USER_MEMORY;
247 void arch_cpu_idle(void)
249 switch(smp_get_num_cpus()) {
250 case 0:
251 panic("You need at least 1 CPU to run NewOS\n");
252 case 1:
253 asm("hlt");
254 default:
255 break;
259 void arch_cpu_sync_icache(void *address, size_t len)
261 // instruction cache is always consistent on x86
264 static void dbg_in(int argc, char **argv)
266 int value;
267 int port;
269 if(argc < 2) {
270 dprintf("not enough args\nusage: %s (1|2|4) port\n", argv[0]);
271 return;
274 port = atoul(argv[2]);
276 switch(argv[1][0]) {
277 case '1':
278 case 'b':
279 value = in8(port);
280 break;
281 case '2':
282 case 'h':
283 value = in16(port);
284 break;
285 case '4':
286 case 'w':
287 value = in32(port);
288 break;
289 default:
290 dprintf("invalid width argument\n");
291 return;
293 dprintf("I/O port 0x%x = 0x%x\n", port, value);
296 static void dbg_out(int argc, char **argv)
298 int value;
299 int port;
301 if(argc < 3) {
302 dprintf("not enough args\nusage: %s (1|2|4) port value\n", argv[0]);
303 return;
306 port = atoul(argv[2]);
307 value = atoul(argv[3]);
309 switch(argv[1][0]) {
310 case '1':
311 case 'b':
312 out8(value, port);
313 break;
314 case '2':
315 case 'h':
316 out16(value, port);
317 break;
318 case '4':
319 case 'w':
320 out32(value, port);
321 break;
322 default:
323 dprintf("invalid width argument\n");
324 return;
326 dprintf("writing 0x%x to I/O port 0x%x\n", value, port);