0.9.12.25:
[sbcl/smoofra.git] / src / runtime / win32-os.c
blob45a4656d25cdca1d735c459afbf669c5f3731313
1 /*
2 * the Win32 incarnation of OS-dependent routines. See also
3 * $(sbcl_arch)-win32-os.c
5 * This file (along with os.h) exports an OS-independent interface to
6 * the operating system VM facilities. Surprise surprise, this
7 * interface looks a lot like the Mach interface (but simpler in some
8 * places). For some operating systems, a subset of these functions
9 * will have to be emulated.
13 * This software is part of the SBCL system. See the README file for
14 * more information.
16 * This software is derived from the CMU CL system, which was
17 * written at Carnegie Mellon University and released into the
18 * public domain. The software is in the public domain and is
19 * provided with absolutely no warranty. See the COPYING and CREDITS
20 * files for more information.
24 * This file was copied from the Linux version of the same, and
25 * likely still has some linuxisms in it have haven't been elimiated
26 * yet.
29 #include <stdio.h>
30 #include <sys/param.h>
31 #include <sys/file.h>
32 #include <io.h>
33 #include "sbcl.h"
34 #include "./signal.h"
35 #include "os.h"
36 #include "arch.h"
37 #include "globals.h"
38 #include "sbcl.h"
39 #include "interrupt.h"
40 #include "interr.h"
41 #include "lispregs.h"
42 #include "runtime.h"
43 #include "monitor.h"
44 #include "alloc.h"
45 #include "genesis/primitive-objects.h"
47 #include <sys/types.h>
48 #include <signal.h>
49 #include <sys/time.h>
50 #include <sys/stat.h>
51 #include <unistd.h>
52 #include <shlobj.h>
54 #include <excpt.h>
56 #include "validate.h"
57 #include "thread.h"
58 size_t os_vm_page_size;
61 #include "gc.h"
62 #include "gencgc-internal.h"
64 #if 0
65 int linux_sparc_siginfo_bug = 0;
66 int linux_supports_futex=0;
67 #endif
69 /* The exception handling function looks like this: */
70 EXCEPTION_DISPOSITION handle_exception(EXCEPTION_RECORD *,
71 struct lisp_exception_frame *,
72 CONTEXT *,
73 void *);
75 void *base_seh_frame;
77 static void *get_seh_frame(void)
79 void* retval;
80 asm volatile ("movl %%fs:0,%0": "=r" (retval));
81 return retval;
84 static void set_seh_frame(void *frame)
86 asm volatile ("movl %0,%%fs:0": : "r" (frame));
89 static struct lisp_exception_frame *find_our_seh_frame(void)
91 struct lisp_exception_frame *frame = get_seh_frame();
93 while (frame->handler != handle_exception)
94 frame = frame->next_frame;
96 return frame;
99 #if 0
100 inline static void *get_stack_frame(void)
102 void* retval;
103 asm volatile ("movl %%ebp,%0": "=r" (retval));
104 return retval;
106 #endif
108 void os_init(char *argv[], char *envp[])
110 SYSTEM_INFO system_info;
112 GetSystemInfo(&system_info);
113 os_vm_page_size = system_info.dwPageSize;
115 base_seh_frame = get_seh_frame();
120 * So we have three fun scenarios here.
122 * First, we could be being called to reserve the memory areas
123 * during initialization (prior to loading the core file).
125 * Second, we could be being called by the GC to commit a page
126 * that has just been decommitted (for easy zero-fill).
128 * Third, we could be being called by create_thread_struct()
129 * in order to create the sundry and various stacks.
131 * The third case is easy to pick out because it passes an
132 * addr of 0.
134 * The second case is easy to pick out because it will be for
135 * a range of memory that is MEM_RESERVE rather than MEM_FREE.
137 * The second case is also an easy implement, because we leave
138 * the memory as reserved (since we do lazy commits).
141 os_vm_address_t
142 os_validate(os_vm_address_t addr, os_vm_size_t len)
144 MEMORY_BASIC_INFORMATION mem_info;
146 if (!addr) {
147 /* the simple case first */
148 os_vm_address_t real_addr;
149 if (!(real_addr = VirtualAlloc(addr, len, MEM_COMMIT, PAGE_EXECUTE_READWRITE))) {
150 perror("VirtualAlloc");
151 return 0;
154 return real_addr;
157 if (!VirtualQuery(addr, &mem_info, sizeof mem_info)) {
158 perror("VirtualQuery");
159 return 0;
162 if ((mem_info.State == MEM_RESERVE) && (mem_info.RegionSize >=len)) return addr;
164 if (mem_info.State == MEM_RESERVE) {
165 fprintf(stderr, "validation of reserved space too short.\n");
166 fflush(stderr);
169 if (!VirtualAlloc(addr, len, (mem_info.State == MEM_RESERVE)? MEM_COMMIT: MEM_RESERVE, PAGE_EXECUTE_READWRITE)) {
170 perror("VirtualAlloc");
171 return 0;
174 return addr;
178 * For os_invalidate(), we merely decommit the memory rather than
179 * freeing the address space. This loses when freeing per-thread
180 * data and related memory since it leaks address space. It's not
181 * too lossy, however, since the two scenarios I'm aware of are
182 * fd-stream buffers, which are pooled rather than torched, and
183 * thread information, which I hope to pool (since windows creates
184 * threads at its own whim, and we probably want to be able to
185 * have them callback without funky magic on the part of the user,
186 * and full-on thread allocation is fairly heavyweight). Someone
187 * will probably shoot me down on this with some pithy comment on
188 * the use of (setf symbol-value) on a special variable. I'm happy
189 * for them.
192 void
193 os_invalidate(os_vm_address_t addr, os_vm_size_t len)
195 if (!VirtualFree(addr, len, MEM_DECOMMIT)) {
196 perror("VirtualFree");
201 * os_map() is called to map a chunk of the core file into memory.
203 * Unfortunately, Windows semantics completely screws this up, so
204 * we just add backing store from the swapfile to where the chunk
205 * goes and read it up like a normal file. We could consider using
206 * a lazy read (demand page) setup, but that would mean keeping an
207 * open file pointer for the core indefinately (and be one more
208 * thing to maintain).
211 os_vm_address_t
212 os_map(int fd, int offset, os_vm_address_t addr, os_vm_size_t len)
214 os_vm_size_t count;
216 fprintf(stderr, "os_map: %d, 0x%x, %p, 0x%x.\n", fd, offset, addr, len);
217 fflush(stderr);
219 if (!VirtualAlloc(addr, len, MEM_COMMIT, PAGE_EXECUTE_READWRITE)) {
220 perror("VirtualAlloc");
221 lose("os_map: VirtualAlloc failure");
224 if (lseek(fd, offset, SEEK_SET) == -1) {
225 lose("os_map: Seek failure.");
228 count = read(fd, addr, len);
229 if (count != len) {
230 fprintf(stderr, "expected 0x%x, read 0x%x.\n", len, count);
231 lose("os_map: Failed to read enough bytes.");
234 return addr;
237 static DWORD os_protect_modes[8] = {
238 PAGE_NOACCESS,
239 PAGE_READONLY,
240 PAGE_READWRITE,
241 PAGE_READWRITE,
242 PAGE_EXECUTE,
243 PAGE_EXECUTE_READ,
244 PAGE_EXECUTE_READWRITE,
245 PAGE_EXECUTE_READWRITE,
248 void
249 os_protect(os_vm_address_t address, os_vm_size_t length, os_vm_prot_t prot)
251 DWORD old_prot;
253 if (!VirtualProtect(address, length, os_protect_modes[prot], &old_prot)) {
254 fprintf(stderr, "VirtualProtect failed, code 0x%lx.\n", GetLastError());
255 fflush(stderr);
259 /* FIXME: Now that FOO_END, rather than FOO_SIZE, is the fundamental
260 * description of a space, we could probably punt this and just do
261 * (FOO_START <= x && x < FOO_END) everywhere it's called. */
262 static boolean
263 in_range_p(os_vm_address_t a, lispobj sbeg, size_t slen)
265 char* beg = (char*)((long)sbeg);
266 char* end = (char*)((long)sbeg) + slen;
267 char* adr = (char*)a;
268 return (adr >= beg && adr < end);
271 boolean
272 is_valid_lisp_addr(os_vm_address_t addr)
274 struct thread *th;
275 if(in_range_p(addr, READ_ONLY_SPACE_START, READ_ONLY_SPACE_SIZE) ||
276 in_range_p(addr, STATIC_SPACE_START , STATIC_SPACE_SIZE) ||
277 in_range_p(addr, DYNAMIC_SPACE_START , DYNAMIC_SPACE_SIZE))
278 return 1;
279 for_each_thread(th) {
280 if(((os_vm_address_t)th->control_stack_start <= addr) && (addr < (os_vm_address_t)th->control_stack_end))
281 return 1;
282 if(in_range_p(addr, (unsigned long)th->binding_stack_start, BINDING_STACK_SIZE))
283 return 1;
285 return 0;
289 * any OS-dependent special low-level handling for signals
292 /* A tiny bit of interrupt.c state we want our paws on. */
293 extern boolean internal_errors_enabled;
296 * FIXME: There is a potential problem with foreign code here.
297 * If we are running foreign code instead of lisp code and an
298 * exception occurs we arrange a call into Lisp. If the
299 * foreign code has installed an exception handler, we run the
300 * very great risk of throwing through their exception handler
301 * without asking it to unwind. This is more a problem with
302 * non-sigtrap (EXCEPTION_BREAKPOINT) exceptions, as they could
303 * reasonably be expected to happen in foreign code. We need to
304 * figure out the exception handler unwind semantics and adhere
305 * to them (probably by abusing the Lisp unwind-protect system)
306 * if we are going to handle this scenario correctly.
308 * A good explanation of the exception handling semantics is
309 * http://win32assembly.online.fr/Exceptionhandling.html .
310 * We will also need to handle this ourselves when foreign
311 * code tries to unwind -us-.
313 * When unwinding through foreign code we should unwind the
314 * Lisp stack to the entry from foreign code, then unwind the
315 * foreign code stack to the entry from Lisp, then resume
316 * unwinding in Lisp.
319 EXCEPTION_DISPOSITION sigtrap_emulator(CONTEXT *context,
320 struct lisp_exception_frame *exception_frame)
322 if (*((char *)context->Eip + 1) == trap_ContextRestore) {
324 * This is the cleanup for what is immediately below, and
325 * for the generic exception handling further below. We
326 * have to memcpy() the original context (emulated sigtrap
327 * or normal exception) over our context and resume it.
329 memcpy(context, &exception_frame->context, sizeof(CONTEXT));
330 return ExceptionContinueExecution;
332 } else { /* Not a trap_ContextRestore, must be a sigtrap. */
333 /* sigtrap_trampoline is defined in x86-assem.S. */
334 extern void sigtrap_trampoline;
337 * Unlike some other operating systems, Win32 leaves EIP
338 * pointing to the breakpoint instruction.
340 context->Eip++;
343 * We're not on an alternate stack like we would be in some
344 * other operating systems, and we don't want to risk leaking
345 * any important resources if we throw out of the sigtrap
346 * handler, so we need to copy off our context to a "safe"
347 * place and then monkey with the return EIP to point to a
348 * trampoline which calls another function which copies the
349 * context out to a really-safe place and then calls the real
350 * sigtrap handler. When the real sigtrap handler returns, the
351 * trampoline then contains another breakpoint with a code of
352 * trap_ContextRestore (see above). Essentially the same
353 * mechanism is used by the generic exception path. There is
354 * a small window of opportunity between us copying the
355 * context to the "safe" place and the sigtrap wrapper copying
356 * it to the really-safe place (allocated in its stack frame)
357 * during which the context can be smashed. The only scenario
358 * I can come up with for this, however, involves a stack
359 * overflow occuring at just the wrong time (which makes one
360 * wonder how stack overflow exceptions even happen, given
361 * that we don't switch stacks for exception processing...)
363 memcpy(&exception_frame->context, context, sizeof(CONTEXT));
364 context->Eax = context->Eip;
365 context->Eip = (unsigned long)&sigtrap_trampoline;
367 /* and return */
368 return ExceptionContinueExecution;
372 void sigtrap_wrapper(void)
375 * This is the wrapper around the sigtrap handler called from
376 * the trampoline returned to from the function above.
378 * There actually is a point to some of the commented-out code
379 * in this function, although it really belongs to the callback
380 * wrappers. Once it is installed there, it can probably be
381 * removed from here.
384 extern void sigtrap_handler(int signal, siginfo_t *info, void *context);
386 /* volatile struct { */
387 /* void *handler[2]; */
388 CONTEXT context;
389 /* } handler; */
391 struct lisp_exception_frame *frame = find_our_seh_frame();
393 /* wos_install_interrupt_handlers(handler); */
394 /* handler.handler[0] = get_seh_frame(); */
395 /* handler.handler[1] = &handle_exception; */
396 /* set_seh_frame(&handler); */
398 memcpy(&context, &frame->context, sizeof(CONTEXT));
399 sigtrap_handler(0, NULL, &context);
400 memcpy(&frame->context, &context, sizeof(CONTEXT));
402 /* set_seh_frame(handler.handler[0]); */
405 EXCEPTION_DISPOSITION handle_exception(EXCEPTION_RECORD *exception_record,
406 struct lisp_exception_frame *exception_frame,
407 CONTEXT *context,
408 void *dc) /* FIXME: What's dc again? */
411 /* For EXCEPTION_ACCESS_VIOLATION only. */
412 void *fault_address = (void *)exception_record->ExceptionInformation[1];
414 if (exception_record->ExceptionCode == EXCEPTION_BREAKPOINT) {
415 /* Pick off sigtrap case first. */
416 return sigtrap_emulator(context, exception_frame);
418 } else if (exception_record->ExceptionCode == EXCEPTION_ACCESS_VIOLATION &&
419 (is_valid_lisp_addr(fault_address) ||
420 /* the linkage table does not contain valid lisp
421 * objects, but is also committed on-demand here
423 in_range_p(fault_address, LINKAGE_TABLE_SPACE_START,
424 LINKAGE_TABLE_SPACE_END))) {
425 /* Pick off GC-related memory fault next. */
426 MEMORY_BASIC_INFORMATION mem_info;
428 if (!VirtualQuery(fault_address, &mem_info, sizeof mem_info)) {
429 fprintf(stderr, "VirtualQuery: 0x%lx.\n", GetLastError());
430 lose("handle_exception: VirtualQuery failure");
433 if (mem_info.State == MEM_RESERVE) {
434 /* First use new page, lets get some memory for it. */
435 if (!VirtualAlloc(mem_info.BaseAddress, os_vm_page_size,
436 MEM_COMMIT, PAGE_EXECUTE_READWRITE)) {
437 fprintf(stderr, "VirtualAlloc: 0x%lx.\n", GetLastError());
438 lose("handle_exception: VirtualAlloc failure");
440 } else {
442 * Now, if the page is supposedly write-protected and this
443 * is a write, tell the gc that it's been hit.
445 * FIXME: Are we supposed to fall-through to the Lisp
446 * exception handler if the gc doesn't take the wp violation?
448 if (exception_record->ExceptionInformation[0]) {
449 int index = find_page_index(fault_address);
450 if ((index != -1) && (page_table[index].write_protected)) {
451 gencgc_handle_wp_violation(fault_address);
454 return ExceptionContinueExecution;
457 } else if (gencgc_handle_wp_violation(fault_address)) {
458 /* gc accepts the wp violation, so resume where we left off. */
459 return ExceptionContinueExecution;
462 /* All else failed, drop through to the lisp-side exception handler. */
466 * If we fall through to here then we need to either forward
467 * the exception to the lisp-side exception handler if it's
468 * set up, or drop to LDB.
471 if (internal_errors_enabled) {
472 /* exception_trampoline is defined in x86-assem.S. */
473 extern void exception_trampoline;
476 * We're making the somewhat arbitrary decision that
477 * having internal errors enabled means that lisp has
478 * sufficient marbles to be able to handle exceptions.
480 * Exceptions aren't supposed to happen during cold
481 * init or reinit anyway.
485 * We use the same mechanism as the sigtrap emulator above
486 * with just a couple changes. We obviously use a different
487 * trampoline and wrapper function, we kill out any live
488 * floating point exceptions, and we save off the exception
489 * record as well as the context.
492 /* Save off context and exception information */
493 memcpy(&exception_frame->context, context, sizeof(CONTEXT));
494 memcpy(&exception_frame->exception, exception_record, sizeof(EXCEPTION_RECORD));
496 /* Set up to activate trampoline when we return */
497 context->Eax = context->Eip;
498 context->Eip = (unsigned long)&exception_trampoline;
500 /* Make sure a floating-point trap doesn't kill us */
501 context->FloatSave.StatusWord &= ~0x3f;
503 /* And return */
504 return ExceptionContinueExecution;
507 fprintf(stderr, "Exception Code: 0x%lx.\n", exception_record->ExceptionCode);
508 fprintf(stderr, "Faulting IP: 0x%lx.\n", (DWORD)exception_record->ExceptionAddress);
509 if (exception_record->ExceptionCode == EXCEPTION_ACCESS_VIOLATION) {
510 MEMORY_BASIC_INFORMATION mem_info;
512 if (VirtualQuery(fault_address, &mem_info, sizeof mem_info)) {
513 fprintf(stderr, "page status: 0x%lx.\n", mem_info.State);
516 fprintf(stderr, "Was writing: %ld, where: 0x%lx.\n",
517 exception_record->ExceptionInformation[0],
518 (DWORD)fault_address);
521 fflush(stderr);
523 fake_foreign_function_call(context);
524 monitor_or_something();
526 return ExceptionContinueSearch;
529 void handle_win32_exception_wrapper(void)
531 struct lisp_exception_frame *frame = find_our_seh_frame();
532 CONTEXT context;
533 EXCEPTION_RECORD exception_record;
534 lispobj context_sap;
535 lispobj exception_record_sap;
537 memcpy(&context, &frame->context, sizeof(CONTEXT));
538 memcpy(&exception_record, &frame->exception, sizeof(EXCEPTION_RECORD));
540 fake_foreign_function_call(&context);
542 /* Allocate the SAP objects while the "interrupts" are still
543 * disabled. */
544 context_sap = alloc_sap(&context);
545 exception_record_sap = alloc_sap(&exception_record);
547 funcall2(SymbolFunction(HANDLE_WIN32_EXCEPTION), context_sap,
548 exception_record_sap);
550 undo_fake_foreign_function_call(&context);
552 memcpy(&frame->context, &context, sizeof(CONTEXT));
555 void
556 wos_install_interrupt_handlers(struct lisp_exception_frame *handler)
558 handler->next_frame = get_seh_frame();
559 handler->handler = &handle_exception;
560 set_seh_frame(handler);
563 void bcopy(const void *src, void *dest, size_t n)
565 MoveMemory(dest, src, n);
569 * The stubs below are replacements for the windows versions,
570 * which can -fail- when used in our memory spaces because they
571 * validate the memory spaces they are passed in a way that
572 * denies our exception handler a chance to run.
575 void *memmove(void *dest, const void *src, size_t n)
577 if (dest < src) {
578 int i;
579 for (i = 0; i < n; i++) *(((char *)dest)+i) = *(((char *)src)+i);
580 } else {
581 while (n--) *(((char *)dest)+n) = *(((char *)src)+n);
583 return dest;
586 void *memcpy(void *dest, const void *src, size_t n)
588 while (n--) *(((char *)dest)+n) = *(((char *)src)+n);
589 return dest;
592 char *dirname(char *path)
594 static char buf[PATH_MAX + 1];
595 size_t pathlen = strlen(path);
596 int i;
598 if (pathlen >= sizeof(buf)) {
599 lose("Pathname too long in dirname.\n");
600 return NULL;
603 strcpy(buf, path);
604 for (i = pathlen; i >= 0; --i) {
605 if (buf[i] == '/' || buf[i] == '\\') {
606 buf[i] = '\0';
607 break;
611 return buf;
614 /* This is a manually-maintained version of ldso_stubs.S. */
616 void scratch(void)
618 strerror(42);
619 asin(0);
620 acos(0);
621 sinh(0);
622 cosh(0);
623 hypot(0, 0);
624 write(0, 0, 0);
625 close(0);
626 #ifndef LISP_FEATURE_SB_UNICODE
627 MoveFileA(0,0);
628 #else
629 MoveFileW(0,0);
630 #endif
631 #ifndef LISP_FEATURE_SB_UNICODE
632 GetCurrentDirectoryA(0,0);
633 #else
634 GetCurrentDirectoryW(0,0);
635 #endif
636 dup(0);
637 LoadLibrary(0);
638 GetProcAddress(0, 0);
639 FreeLibrary(0);
640 #ifndef LISP_FEATURE_SB_UNICODE
641 CreateDirectoryA(0,0);
642 #else
643 CreateDirectoryW(0,0);
644 #endif
645 _pipe(0,0,0);
646 isatty(0);
647 access(0,0);
648 GetLastError();
649 FormatMessageA(0, 0, 0, 0, 0, 0, 0);
650 #ifdef LISP_FEATURE_SB_UNICODE
651 FormatMessageW(0, 0, 0, 0, 0, 0, 0);
652 #endif
653 _get_osfhandle(0);
654 ReadFile(0, 0, 0, 0, 0);
655 WriteFile(0, 0, 0, 0, 0);
656 PeekNamedPipe(0, 0, 0, 0, 0, 0);
657 FlushConsoleInputBuffer(0);
658 PeekConsoleInput(0, 0, 0, 0);
659 Sleep(0);
660 #ifndef LISP_FEATURE_SB_UNICODE
661 SHGetFolderPathA(0, 0, 0, 0, 0);
662 #else
663 SHGetFolderPathW(0, 0, 0, 0, 0);
664 #endif
665 GetACP();
666 GetOEMCP();
667 LocalFree(0);
668 #ifndef LISP_FEATURE_SB_UNICODE
669 GetEnvironmentVariableA(0, 0, 0);
670 #else
671 GetEnvironmentVariableW(0, 0, 0);
672 #endif
673 GetConsoleCP();
674 GetConsoleOutputCP();
675 GetExitCodeProcess(0, 0);
678 char *
679 os_get_runtime_executable_path()
681 char path[MAX_PATH + 1];
682 DWORD bufsize = sizeof(path);
683 DWORD size;
685 if ((size = GetModuleFileNameA(NULL, path, bufsize)) == 0)
686 return NULL;
687 else if (size == bufsize && GetLastError() == ERROR_INSUFFICIENT_BUFFER)
688 return NULL;
690 return copied_string(path);
693 /* EOF */