2 ** Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
3 ** Distributed under the terms of the NewOS License.
5 #include <kernel/kernel.h>
6 #include <kernel/cpu.h>
7 #include <kernel/arch/cpu.h>
8 #include <kernel/heap.h>
10 #include <kernel/debug.h>
11 #include <kernel/smp.h>
12 #include <kernel/debug.h>
13 #include <kernel/arch/i386/selector.h>
14 #include <kernel/arch/int.h>
15 #include <kernel/arch/i386/interrupts.h>
16 #include <newos/errors.h>
18 #include <boot/stage2.h>
24 /* a few debug functions that get added to the kernel debugger menu */
25 static void dbg_in(int argc
, char **argv
);
26 static void dbg_out(int argc
, char **argv
);
28 static struct tss
**tss
;
29 static int *tss_loaded
;
31 /* tss to switch to a special 'task' on the double fault handler */
32 static struct tss double_fault_tss
;
33 static uint32 double_fault_stack
[1024];
35 static desc_table
*gdt
= 0;
37 int arch_cpu_preboot_init(kernel_args
*ka
)
43 int arch_cpu_init(kernel_args
*ka
)
45 setup_system_time(ka
->arch_args
.system_time_cv_factor
);
50 int arch_cpu_init2(kernel_args
*ka
)
53 struct tss_descriptor
*tss_d
;
56 // account for the segment descriptors
57 gdt
= (desc_table
*)ka
->arch_args
.vir_gdt
;
58 vm_create_anonymous_region(vm_get_kernel_aspace_id(), "gdt", (void **)&gdt
,
59 REGION_ADDR_EXACT_ADDRESS
, PAGE_SIZE
, REGION_WIRING_WIRED_ALREADY
, LOCK_RW
|LOCK_KERNEL
);
61 i386_selector_init( gdt
); // pass the new gdt
63 tss
= kmalloc(sizeof(struct tss
*) * ka
->num_cpus
);
65 panic("arch_cpu_init2: could not allocate buffer for tss pointers\n");
69 tss_loaded
= kmalloc(sizeof(int) * ka
->num_cpus
);
71 panic("arch_cpu_init2: could not allocate buffer for tss booleans\n");
74 memset(tss_loaded
, 0, sizeof(int) * ka
->num_cpus
);
76 for(i
=0; i
<ka
->num_cpus
; i
++) {
79 sprintf(tss_name
, "tss%d", i
);
80 rid
= vm_create_anonymous_region(vm_get_kernel_aspace_id(), tss_name
, (void **)&tss
[i
],
81 REGION_ADDR_ANY_ADDRESS
, PAGE_SIZE
, REGION_WIRING_WIRED
, LOCK_RW
|LOCK_KERNEL
);
83 panic("arch_cpu_init2: unable to create region for tss\n");
87 memset(tss
[i
], 0, sizeof(struct tss
));
88 tss
[i
]->ss0
= KERNEL_DATA_SEG
;
90 // add TSS descriptor for this new TSS
91 tss_d
= (struct tss_descriptor
*)&gdt
[6 + i
];
92 tss_d
->limit_00_15
= sizeof(struct tss
) & 0xffff;
93 tss_d
->limit_19_16
= 0; // not this long
94 tss_d
->base_00_15
= (addr_t
)tss
[i
] & 0xffff;
95 tss_d
->base_23_16
= ((addr_t
)tss
[i
] >> 16) & 0xff;
96 tss_d
->base_31_24
= (addr_t
)tss
[i
] >> 24;
104 tss_d
->granularity
= 1;
108 /* set up the double fault tss */
109 memset(&double_fault_tss
, 0, sizeof(double_fault_tss
));
110 double_fault_tss
.sp0
= (uint32
)double_fault_stack
+ sizeof(double_fault_stack
);
111 double_fault_tss
.ss0
= KERNEL_DATA_SEG
;
112 read_cr3(double_fault_tss
.cr3
); // copy the current cr3 to the double fault cr3
113 double_fault_tss
.eip
= (uint32
)&trap8
;
114 double_fault_tss
.es
= KERNEL_DATA_SEG
;
115 double_fault_tss
.cs
= KERNEL_CODE_SEG
;
116 double_fault_tss
.ss
= KERNEL_DATA_SEG
;
117 double_fault_tss
.ds
= KERNEL_DATA_SEG
;
118 double_fault_tss
.fs
= KERNEL_DATA_SEG
;
119 double_fault_tss
.gs
= KERNEL_DATA_SEG
;
120 double_fault_tss
.ldt_seg_selector
= KERNEL_DATA_SEG
;
122 tss_d
= (struct tss_descriptor
*)&gdt
[5];
123 tss_d
->limit_00_15
= sizeof(struct tss
) & 0xffff;
124 tss_d
->limit_19_16
= 0; // not this long
125 tss_d
->base_00_15
= (addr_t
)&double_fault_tss
& 0xffff;
126 tss_d
->base_23_16
= ((addr_t
)&double_fault_tss
>> 16) & 0xff;
127 tss_d
->base_31_24
= (addr_t
)&double_fault_tss
>> 24;
128 tss_d
->type
= 0x9; // tss descriptor, not busy
135 tss_d
->granularity
= 1;
137 i386_set_task_gate(8, DOUBLE_FAULT_TSS
);
139 // set up a few debug commands (in, out)
140 dbg_add_command(&dbg_in
, "in", "read I/O port");
141 dbg_add_command(&dbg_out
, "out", "write I/O port");
146 desc_table
*i386_get_gdt(void)
151 void i386_set_kstack(addr_t kstack
)
153 int curr_cpu
= smp_get_current_cpu();
155 // dprintf("i386_set_kstack: kstack 0x%x, cpu %d\n", kstack, curr_cpu);
156 if(tss_loaded
[curr_cpu
] == 0) {
157 short seg
= (TSS
+ 8*curr_cpu
);
159 "ltr %%ax;" : : "r" (seg
) : "eax");
160 tss_loaded
[curr_cpu
] = 1;
163 tss
[curr_cpu
]->sp0
= kstack
;
164 // dprintf("done\n");
167 void arch_cpu_invalidate_TLB_range(addr_t start
, addr_t end
)
169 for(; start
< end
; start
+= PAGE_SIZE
) {
170 invalidate_TLB(start
);
174 void arch_cpu_invalidate_TLB_list(addr_t pages
[], int num_pages
)
177 for(i
=0; i
<num_pages
; i
++) {
178 invalidate_TLB(pages
[i
]);
182 int arch_cpu_user_memcpy(void *to
, const void *from
, size_t size
, addr_t
*fault_handler
)
184 char *tmp
= (char *)to
;
185 char *s
= (char *)from
;
187 *fault_handler
= (addr_t
)&&error
;
197 return ERR_VM_BAD_USER_MEMORY
;
200 int arch_cpu_user_strcpy(char *to
, const char *from
, addr_t
*fault_handler
)
202 *fault_handler
= (addr_t
)&&error
;
204 while((*to
++ = *from
++) != '\0')
212 return ERR_VM_BAD_USER_MEMORY
;
215 int arch_cpu_user_strncpy(char *to
, const char *from
, size_t size
, addr_t
*fault_handler
)
217 *fault_handler
= (addr_t
)&&error
;
219 while(size
-- && (*to
++ = *from
++) != '\0')
227 return ERR_VM_BAD_USER_MEMORY
;
230 int arch_cpu_user_memset(void *s
, char c
, size_t count
, addr_t
*fault_handler
)
232 char *xs
= (char *) s
;
234 *fault_handler
= (addr_t
)&&error
;
244 return ERR_VM_BAD_USER_MEMORY
;
247 void arch_cpu_idle(void)
249 switch(smp_get_num_cpus()) {
251 panic("You need at least 1 CPU to run NewOS\n");
259 void arch_cpu_sync_icache(void *address
, size_t len
)
261 // instruction cache is always consistent on x86
264 static void dbg_in(int argc
, char **argv
)
270 dprintf("not enough args\nusage: %s (1|2|4) port\n", argv
[0]);
274 port
= atoul(argv
[2]);
290 dprintf("invalid width argument\n");
293 dprintf("I/O port 0x%x = 0x%x\n", port
, value
);
296 static void dbg_out(int argc
, char **argv
)
302 dprintf("not enough args\nusage: %s (1|2|4) port value\n", argv
[0]);
306 port
= atoul(argv
[2]);
307 value
= atoul(argv
[3]);
323 dprintf("invalid width argument\n");
326 dprintf("writing 0x%x to I/O port 0x%x\n", value
, port
);