Sync usage with man page.
[netbsd-mini2440.git] / sys / compat / ndis / subr_ntoskrnl.c
blob209ce825faa6609dbbac55d23b7407837e69a9f8
1 /*-
2 * Copyright (c) 2003
3 * Bill Paul <wpaul@windriver.com>. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by Bill Paul.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
33 #include <sys/cdefs.h>
34 #ifdef __FreeBSD__
35 __FBSDID("$FreeBSD: src/sys/compat/ndis/subr_ntoskrnl.c,v 1.43.2.5 2005/03/31 04:24:36 wpaul Exp $");
36 #endif
37 #ifdef __NetBSD__
38 __KERNEL_RCSID(0, "$NetBSD: subr_ntoskrnl.c,v 1.17 2009/03/18 16:00:17 cegger Exp $");
39 #endif
41 #ifdef __FreeBSD__
42 #include <sys/ctype.h>
43 #endif
44 #include <sys/unistd.h>
45 #include <sys/param.h>
46 #include <sys/types.h>
47 #include <sys/errno.h>
48 #include <sys/systm.h>
49 #include <sys/malloc.h>
50 #include <sys/lock.h>
51 #ifdef __FreeBSD__
52 #include <sys/mutex.h>
53 #endif
55 #include <sys/callout.h>
56 #if __FreeBSD_version > 502113
57 #include <sys/kdb.h>
58 #endif
59 #include <sys/kernel.h>
60 #include <sys/proc.h>
61 #include <sys/kthread.h>
62 #include <sys/module.h>
63 #include <sys/atomic.h>
64 #ifdef __FreeBSD__
65 #include <machine/clock.h>
66 #include <machine/bus_memio.h>
67 #include <machine/bus_pio.h>
68 #endif
69 #include <sys/bus.h>
70 #include <machine/stdarg.h>
72 #ifdef __FreeBSD__
73 #include <sys/bus.h>
74 #include <sys/rman.h>
75 #endif
77 #ifdef __NetBSD__
78 #include <uvm/uvm.h>
79 #include <uvm/uvm_param.h>
80 #include <uvm/uvm_pmap.h>
81 #include <sys/pool.h>
82 #include <sys/reboot.h> /* for AB_VERBOSE */
83 #else
84 #include <vm/vm.h>
85 #include <vm/vm_param.h>
86 #include <vm/pmap.h>
87 #include <vm/uma.h>
88 #endif
90 #include <compat/ndis/pe_var.h>
91 #include <compat/ndis/ntoskrnl_var.h>
92 #include <compat/ndis/hal_var.h>
93 #include <compat/ndis/resource_var.h>
94 #include <compat/ndis/ndis_var.h>
95 #ifdef __NetBSD__
96 #include <compat/ndis/nbcompat.h>
97 #endif
99 #define __regparm __attribute__((regparm(3)))
101 #ifdef __NetBSD__
102 /* Turn on DbgPrint() from Windows Driver*/
103 #define boothowto AB_VERBOSE
104 #endif
106 __stdcall static uint8_t RtlEqualUnicodeString(ndis_unicode_string *,
107 ndis_unicode_string *, uint8_t);
108 __stdcall static void RtlCopyUnicodeString(ndis_unicode_string *,
109 ndis_unicode_string *);
110 __stdcall static ndis_status RtlUnicodeStringToAnsiString(ndis_ansi_string *,
111 ndis_unicode_string *, uint8_t);
112 __stdcall static ndis_status RtlAnsiStringToUnicodeString(ndis_unicode_string *,
113 ndis_ansi_string *, uint8_t);
114 __stdcall static irp *IoBuildSynchronousFsdRequest(uint32_t, device_object *,
115 void *, uint32_t, uint64_t *, nt_kevent *, io_status_block *);
116 __stdcall static irp *IoBuildAsynchronousFsdRequest(uint32_t,
117 device_object *, void *, uint32_t, uint64_t *, io_status_block *);
118 __stdcall static irp *IoBuildDeviceIoControlRequest(uint32_t,
119 device_object *, void *, uint32_t, void *, uint32_t,
120 uint8_t, nt_kevent *, io_status_block *);
121 __stdcall static irp *IoAllocateIrp(uint8_t, uint8_t);
122 __stdcall static void IoReuseIrp(irp *, uint32_t);
123 __stdcall static void IoFreeIrp(irp *);
124 __stdcall static void IoInitializeIrp(irp *, uint16_t, uint8_t);
125 __stdcall static irp *IoMakeAssociatedIrp(irp *, uint8_t);
126 __stdcall static uint32_t KeWaitForMultipleObjects(uint32_t,
127 nt_dispatch_header **, uint32_t, uint32_t, uint32_t, uint8_t,
128 int64_t *, wait_block *);
129 static void ntoskrnl_wakeup(void *);
130 static void ntoskrnl_timercall(void *);
131 static void ntoskrnl_run_dpc(void *);
132 __stdcall static void WRITE_REGISTER_USHORT(uint16_t *, uint16_t);
133 __stdcall static uint16_t READ_REGISTER_USHORT(uint16_t *);
134 __stdcall static void WRITE_REGISTER_ULONG(uint32_t *, uint32_t);
135 __stdcall static uint32_t READ_REGISTER_ULONG(uint32_t *);
136 __stdcall static void WRITE_REGISTER_UCHAR(uint8_t *, uint8_t);
137 __stdcall static uint8_t READ_REGISTER_UCHAR(uint8_t *);
138 __stdcall static int64_t _allmul(int64_t, int64_t);
139 __stdcall static int64_t _alldiv(int64_t, int64_t);
140 __stdcall static int64_t _allrem(int64_t, int64_t);
141 __regparm static int64_t _allshr(int64_t, uint8_t);
142 __regparm static int64_t _allshl(int64_t, uint8_t);
143 __stdcall static uint64_t _aullmul(uint64_t, uint64_t);
144 __stdcall static uint64_t _aulldiv(uint64_t, uint64_t);
145 __stdcall static uint64_t _aullrem(uint64_t, uint64_t);
146 __regparm static uint64_t _aullshr(uint64_t, uint8_t);
147 __regparm static uint64_t _aullshl(uint64_t, uint8_t);
148 static slist_entry *ntoskrnl_pushsl(slist_header *, slist_entry *);
149 static slist_entry *ntoskrnl_popsl(slist_header *);
150 __stdcall static void ExInitializePagedLookasideList(paged_lookaside_list *,
151 lookaside_alloc_func *, lookaside_free_func *,
152 uint32_t, size_t, uint32_t, uint16_t);
153 __stdcall static void ExDeletePagedLookasideList(paged_lookaside_list *);
154 __stdcall static void ExInitializeNPagedLookasideList(npaged_lookaside_list *,
155 lookaside_alloc_func *, lookaside_free_func *,
156 uint32_t, size_t, uint32_t, uint16_t);
157 __stdcall static void ExDeleteNPagedLookasideList(npaged_lookaside_list *);
158 __fastcall static slist_entry
159 *InterlockedPushEntrySList(REGARGS2(slist_header *head,
160 slist_entry *entry));
161 __fastcall static slist_entry *InterlockedPopEntrySList(REGARGS1(slist_header
162 *head));
163 __fastcall static slist_entry
164 *ExInterlockedPushEntrySList(REGARGS2(slist_header *head,
165 slist_entry *entry), kspin_lock *lock);
166 __fastcall static slist_entry
167 *ExInterlockedPopEntrySList(REGARGS2(slist_header *head,
168 kspin_lock *lock));
169 __stdcall static uint16_t
170 ExQueryDepthSList(slist_header *);
171 __fastcall static uint32_t
172 InterlockedIncrement(REGARGS1(volatile uint32_t *addend));
173 __fastcall static uint32_t
174 InterlockedDecrement(REGARGS1(volatile uint32_t *addend));
175 __fastcall static void
176 ExInterlockedAddLargeStatistic(REGARGS2(uint64_t *addend, uint32_t));
177 __stdcall static uint32_t MmSizeOfMdl(void *, size_t);
178 __stdcall static void MmBuildMdlForNonPagedPool(mdl *);
179 __stdcall static void *MmMapLockedPages(mdl *, uint8_t);
180 __stdcall static void *MmMapLockedPagesSpecifyCache(mdl *,
181 uint8_t, uint32_t, void *, uint32_t, uint32_t);
182 __stdcall static void MmUnmapLockedPages(void *, mdl *);
183 __stdcall static size_t RtlCompareMemory(const void *, const void *, size_t);
184 __stdcall static void RtlInitAnsiString(ndis_ansi_string *, char *);
185 __stdcall static void RtlInitUnicodeString(ndis_unicode_string *,
186 uint16_t *);
187 __stdcall static void RtlFreeUnicodeString(ndis_unicode_string *);
188 __stdcall static void RtlFreeAnsiString(ndis_ansi_string *);
189 __stdcall static ndis_status RtlUnicodeStringToInteger(ndis_unicode_string *,
190 uint32_t, uint32_t *);
191 static int atoi (const char *);
192 static long atol (const char *);
193 static int rand(void);
194 static void srand(unsigned int);
195 static void ntoskrnl_time(uint64_t *);
196 __stdcall static uint8_t IoIsWdmVersionAvailable(uint8_t, uint8_t);
197 static void ntoskrnl_thrfunc(void *);
198 __stdcall static ndis_status PsCreateSystemThread(ndis_handle *,
199 uint32_t, void *, ndis_handle, void *, void *, void *);
200 __stdcall static ndis_status PsTerminateSystemThread(ndis_status);
201 __stdcall static ndis_status IoGetDeviceProperty(device_object *, uint32_t,
202 uint32_t, void *, uint32_t *);
203 __stdcall static void KeInitializeMutex(kmutant *, uint32_t);
204 __stdcall static uint32_t KeReleaseMutex(kmutant *, uint8_t);
205 __stdcall static uint32_t KeReadStateMutex(kmutant *);
206 __stdcall static ndis_status ObReferenceObjectByHandle(ndis_handle,
207 uint32_t, void *, uint8_t, void **, void **);
208 __fastcall static void ObfDereferenceObject(REGARGS1(void *object));
209 __stdcall static uint32_t ZwClose(ndis_handle);
210 static void *ntoskrnl_memset(void *, int, size_t);
211 static funcptr ntoskrnl_findwrap(funcptr);
212 static uint32_t DbgPrint(char *, ...);
213 __stdcall static void DbgBreakPoint(void);
214 __stdcall static void dummy(void);
216 #ifdef __FreeBSD__
217 static struct mtx ntoskrnl_dispatchlock;
218 #else /* __NetBSD__ */
219 static struct simplelock ntoskrnl_dispatchlock;
220 #define DISPATCH_LOCK() do {s = splnet(); simple_lock(&ntoskrnl_dispatchlock);} while(0)
221 #define DISPATCH_UNLOCK() do {simple_unlock(&ntoskrnl_dispatchlock); splx(s);} while(0)
222 #endif
224 static kspin_lock ntoskrnl_global;
225 static kspin_lock ntoskrnl_cancellock;
226 static int ntoskrnl_kth = 0;
227 static struct nt_objref_head ntoskrnl_reflist;
228 #ifdef __FreeBSD__
229 static uma_zone_t mdl_zone;
230 #else
231 static struct pool mdl_pool;
232 #endif
235 ntoskrnl_libinit(void)
237 image_patch_table *patch;
238 #ifdef __FreeBSD__
239 mtx_init(&ntoskrnl_dispatchlock,
240 "ntoskrnl dispatch lock", MTX_NDIS_LOCK, MTX_DEF);
241 #else /* __NetBSD__ */
242 simple_lock_init(&ntoskrnl_dispatchlock);
243 #endif
244 KeInitializeSpinLock(&ntoskrnl_global);
245 KeInitializeSpinLock(&ntoskrnl_cancellock);
246 TAILQ_INIT(&ntoskrnl_reflist);
248 patch = ntoskrnl_functbl;
249 while (patch->ipt_func != NULL) {
250 windrv_wrap((funcptr)patch->ipt_func,
251 (funcptr *)&patch->ipt_wrap);
252 patch++;
256 * MDLs are supposed to be variable size (they describe
257 * buffers containing some number of pages, but we don't
258 * know ahead of time how many pages that will be). But
259 * always allocating them off the heap is very slow. As
260 * a compromize, we create an MDL UMA zone big enough to
261 * handle any buffer requiring up to 16 pages, and we
262 * use those for any MDLs for buffers of 16 pages or less
263 * in size. For buffers larger than that (which we assume
264 * will be few and far between, we allocate the MDLs off
265 * the heap.
268 #ifdef __FreeBSD__
269 mdl_zone = uma_zcreate("Windows MDL", MDL_ZONE_SIZE,
270 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
271 #else
272 pool_init(&mdl_pool, MDL_ZONE_SIZE, 0, 0, 0, "winmdl", NULL,
273 IPL_VM);
274 #endif
276 return(0);
280 ntoskrnl_libfini(void)
282 image_patch_table *patch;
284 patch = ntoskrnl_functbl;
285 while (patch->ipt_func != NULL) {
286 windrv_unwrap(patch->ipt_wrap);
287 patch++;
290 #ifdef __FreeBSD__
291 uma_zdestroy(mdl_zone);
292 mtx_destroy(&ntoskrnl_dispatchlock);
293 #else
294 pool_destroy(&mdl_pool);
295 /* XXX destroy lock */
296 #endif
298 return(0);
302 * We need to be able to reference this externally from the wrapper;
303 * GCC only generates a local implementation of memset.
305 static void *
306 ntoskrnl_memset(void *buf, int ch, size_t size)
308 return(memset(buf, ch, size));
311 __stdcall static uint8_t
312 RtlEqualUnicodeString(ndis_unicode_string *str1, ndis_unicode_string *str2, uint8_t caseinsensitive)
314 int i;
316 if (str1->us_len != str2->us_len)
317 return(FALSE);
319 for (i = 0; i < str1->us_len; i++) {
320 if (caseinsensitive == TRUE) {
321 if (toupper((char)(str1->us_buf[i] & 0xFF)) !=
322 toupper((char)(str2->us_buf[i] & 0xFF)))
323 return(FALSE);
324 } else {
325 if (str1->us_buf[i] != str2->us_buf[i])
326 return(FALSE);
330 return(TRUE);
333 __stdcall static void
334 RtlCopyUnicodeString(ndis_unicode_string *dest, ndis_unicode_string *src)
337 if (dest->us_maxlen >= src->us_len)
338 dest->us_len = src->us_len;
339 else
340 dest->us_len = dest->us_maxlen;
341 memcpy(dest->us_buf, src->us_buf, dest->us_len);
342 return;
345 __stdcall static ndis_status
346 RtlUnicodeStringToAnsiString(ndis_ansi_string *dest, ndis_unicode_string *src, uint8_t allocate)
348 char *astr = NULL;
350 if (dest == NULL || src == NULL)
351 return(NDIS_STATUS_FAILURE);
353 if (allocate == TRUE) {
354 if (ndis_unicode_to_ascii(src->us_buf, src->us_len, &astr))
355 return(NDIS_STATUS_FAILURE);
356 dest->nas_buf = astr;
357 dest->nas_len = dest->nas_maxlen = strlen(astr);
358 } else {
359 dest->nas_len = src->us_len / 2; /* XXX */
360 if (dest->nas_maxlen < dest->nas_len)
361 dest->nas_len = dest->nas_maxlen;
362 ndis_unicode_to_ascii(src->us_buf, dest->nas_len * 2,
363 &dest->nas_buf);
365 return (NDIS_STATUS_SUCCESS);
368 __stdcall static ndis_status
369 RtlAnsiStringToUnicodeString(ndis_unicode_string *dest, ndis_ansi_string *src, uint8_t allocate)
371 uint16_t *ustr = NULL;
373 if (dest == NULL || src == NULL)
374 return(NDIS_STATUS_FAILURE);
376 if (allocate == TRUE) {
377 if (ndis_ascii_to_unicode(src->nas_buf, &ustr))
378 return(NDIS_STATUS_FAILURE);
379 dest->us_buf = ustr;
380 dest->us_len = dest->us_maxlen = strlen(src->nas_buf) * 2;
381 } else {
382 dest->us_len = src->nas_len * 2; /* XXX */
383 if (dest->us_maxlen < dest->us_len)
384 dest->us_len = dest->us_maxlen;
385 ndis_ascii_to_unicode(src->nas_buf, &dest->us_buf);
387 return (NDIS_STATUS_SUCCESS);
390 __stdcall void *
391 ExAllocatePoolWithTag(
392 uint32_t pooltype,
393 size_t len,
394 uint32_t tag)
396 void *buf;
398 buf = malloc(len, M_DEVBUF, M_NOWAIT);
399 if (buf == NULL)
400 return(NULL);
401 return(buf);
404 __stdcall void
405 ExFreePool(void *buf)
407 free(buf, M_DEVBUF);
408 return;
411 __stdcall uint32_t
412 IoAllocateDriverObjectExtension(driver_object *drv, void *clid, uint32_t extlen, void **ext)
414 custom_extension *ce;
416 ce = ExAllocatePoolWithTag(NonPagedPool, sizeof(custom_extension)
417 + extlen, 0);
419 if (ce == NULL)
420 return(STATUS_INSUFFICIENT_RESOURCES);
422 ce->ce_clid = clid;
423 INSERT_LIST_TAIL((&drv->dro_driverext->dre_usrext), (&ce->ce_list));
425 *ext = (void *)(ce + 1);
427 return(STATUS_SUCCESS);
430 __stdcall void *
431 IoGetDriverObjectExtension(driver_object *drv, void *clid)
433 list_entry *e;
434 custom_extension *ce;
436 printf("in IoGetDriverObjectExtension\n");
438 e = drv->dro_driverext->dre_usrext.nle_flink;
439 while (e != &drv->dro_driverext->dre_usrext) {
440 ce = (custom_extension *)e;
441 if (ce->ce_clid == clid)
442 printf("found\n");
443 return((void *)(ce + 1));
444 e = e->nle_flink;
446 printf("not found\n");
447 return(NULL);
451 __stdcall uint32_t
452 IoCreateDevice(
453 driver_object *drv,
454 uint32_t devextlen,
455 unicode_string *devname,
456 uint32_t devtype,
457 uint32_t devchars,
458 uint8_t exclusive,
459 device_object **newdev)
461 device_object *dev;
463 #ifdef NDIS_LKM
464 printf("In IoCreateDevice: drv = %x, devextlen = %x\n", drv, devextlen);
465 #endif
467 dev = ExAllocatePoolWithTag(NonPagedPool, sizeof(device_object), 0);
468 #ifdef NDIS_LKM
469 printf("dev = %x\n", dev);
470 #endif
471 if (dev == NULL)
472 return(STATUS_INSUFFICIENT_RESOURCES);
474 dev->do_type = devtype;
475 dev->do_drvobj = drv;
476 dev->do_currirp = NULL;
477 dev->do_flags = 0;
479 if (devextlen) {
480 dev->do_devext = ExAllocatePoolWithTag(NonPagedPool,
481 devextlen, 0);
483 if (dev->do_devext == NULL) {
484 ExFreePool(dev);
485 return(STATUS_INSUFFICIENT_RESOURCES);
488 memset(dev->do_devext, 0, devextlen);
489 } else
490 dev->do_devext = NULL;
492 dev->do_size = sizeof(device_object) + devextlen;
493 dev->do_refcnt = 1;
494 dev->do_attacheddev = NULL;
495 dev->do_nextdev = NULL;
496 dev->do_devtype = devtype;
497 dev->do_stacksize = 1;
498 dev->do_alignreq = 1;
499 dev->do_characteristics = devchars;
500 dev->do_iotimer = NULL;
501 KeInitializeEvent(&dev->do_devlock, EVENT_TYPE_SYNC, TRUE);
504 * Vpd is used for disk/tape devices,
505 * but we don't support those. (Yet.)
507 dev->do_vpb = NULL;
509 dev->do_devobj_ext = ExAllocatePoolWithTag(NonPagedPool,
510 sizeof(devobj_extension), 0);
512 if (dev->do_devobj_ext == NULL) {
513 if (dev->do_devext != NULL)
514 ExFreePool(dev->do_devext);
515 ExFreePool(dev);
516 return(STATUS_INSUFFICIENT_RESOURCES);
519 dev->do_devobj_ext->dve_type = 0;
520 dev->do_devobj_ext->dve_size = sizeof(devobj_extension);
521 dev->do_devobj_ext->dve_devobj = dev;
524 * Attach this device to the driver object's list
525 * of devices. Note: this is not the same as attaching
526 * the device to the device stack. The driver's AddDevice
527 * routine must explicitly call IoAddDeviceToDeviceStack()
528 * to do that.
531 if (drv->dro_devobj == NULL) {
532 drv->dro_devobj = dev;
533 dev->do_nextdev = NULL;
534 } else {
535 dev->do_nextdev = drv->dro_devobj;
536 drv->dro_devobj = dev;
539 *newdev = dev;
541 return(STATUS_SUCCESS);
544 __stdcall void
545 IoDeleteDevice(device_object *dev)
547 device_object *prev;
549 if (dev == NULL)
550 return;
552 if (dev->do_devobj_ext != NULL)
553 ExFreePool(dev->do_devobj_ext);
555 if (dev->do_devext != NULL)
556 ExFreePool(dev->do_devext);
558 /* Unlink the device from the driver's device list. */
560 prev = dev->do_drvobj->dro_devobj;
561 if (prev == dev)
562 dev->do_drvobj->dro_devobj = dev->do_nextdev;
563 else {
564 while (prev->do_nextdev != dev)
565 prev = prev->do_nextdev;
566 prev->do_nextdev = dev->do_nextdev;
569 ExFreePool(dev);
571 return;
574 __stdcall device_object *
575 IoGetAttachedDevice(device_object *dev)
577 device_object *d;
579 if (dev == NULL)
580 return (NULL);
582 d = dev;
584 while (d->do_attacheddev != NULL)
585 d = d->do_attacheddev;
587 return (d);
590 __stdcall static irp *
591 IoBuildSynchronousFsdRequest(uint32_t func, device_object *dobj, void *buf, uint32_t len, uint64_t *off, nt_kevent *event, io_status_block *status)
593 irp *ip;
595 ip = IoBuildAsynchronousFsdRequest(func, dobj, buf, len, off, status);
596 if (ip == NULL)
597 return(NULL);
598 ip->irp_usrevent = event;
600 return(ip);
603 __stdcall static irp *
604 IoBuildAsynchronousFsdRequest(uint32_t func, device_object *dobj, void *buf, uint32_t len, uint64_t *off, io_status_block *status)
606 irp *ip;
607 io_stack_location *sl;
609 ip = IoAllocateIrp(dobj->do_stacksize, TRUE);
610 if (ip == NULL)
611 return(NULL);
613 ip->irp_usriostat = status;
614 ip->irp_tail.irp_overlay.irp_thread = NULL;
616 sl = IoGetNextIrpStackLocation(ip);
617 sl->isl_major = func;
618 sl->isl_minor = 0;
619 sl->isl_flags = 0;
620 sl->isl_ctl = 0;
621 sl->isl_devobj = dobj;
622 sl->isl_fileobj = NULL;
623 sl->isl_completionfunc = NULL;
625 ip->irp_userbuf = buf;
627 if (dobj->do_flags & DO_BUFFERED_IO) {
628 ip->irp_assoc.irp_sysbuf =
629 ExAllocatePoolWithTag(NonPagedPool, len, 0);
630 if (ip->irp_assoc.irp_sysbuf == NULL) {
631 IoFreeIrp(ip);
632 return(NULL);
634 memcpy( ip->irp_assoc.irp_sysbuf, buf, len);
637 if (dobj->do_flags & DO_DIRECT_IO) {
638 ip->irp_mdl = IoAllocateMdl(buf, len, FALSE, FALSE, ip);
639 if (ip->irp_mdl == NULL) {
640 if (ip->irp_assoc.irp_sysbuf != NULL)
641 ExFreePool(ip->irp_assoc.irp_sysbuf);
642 IoFreeIrp(ip);
643 return(NULL);
645 ip->irp_userbuf = NULL;
646 ip->irp_assoc.irp_sysbuf = NULL;
649 if (func == IRP_MJ_READ) {
650 sl->isl_parameters.isl_read.isl_len = len;
651 if (off != NULL)
652 sl->isl_parameters.isl_read.isl_byteoff = *off;
653 else
654 sl->isl_parameters.isl_read.isl_byteoff = 0;
657 if (func == IRP_MJ_WRITE) {
658 sl->isl_parameters.isl_write.isl_len = len;
659 if (off != NULL)
660 sl->isl_parameters.isl_write.isl_byteoff = *off;
661 else
662 sl->isl_parameters.isl_write.isl_byteoff = 0;
665 return(ip);
668 __stdcall static irp *
669 IoBuildDeviceIoControlRequest(iocode, dobj, ibuf, ilen, obuf, olen,
670 isinternal, event, status)
671 uint32_t iocode;
672 device_object *dobj;
673 void *ibuf;
674 uint32_t ilen;
675 void *obuf;
676 uint32_t olen;
677 uint8_t isinternal;
678 nt_kevent *event;
679 io_status_block *status;
681 irp *ip;
682 io_stack_location *sl;
683 uint32_t buflen;
685 ip = IoAllocateIrp(dobj->do_stacksize, TRUE);
686 if (ip == NULL)
687 return(NULL);
688 ip->irp_usrevent = event;
689 ip->irp_usriostat = status;
690 ip->irp_tail.irp_overlay.irp_thread = NULL;
692 sl = IoGetNextIrpStackLocation(ip);
693 sl->isl_major = isinternal == TRUE ?
694 IRP_MJ_INTERNAL_DEVICE_CONTROL : IRP_MJ_DEVICE_CONTROL;
695 sl->isl_minor = 0;
696 sl->isl_flags = 0;
697 sl->isl_ctl = 0;
698 sl->isl_devobj = dobj;
699 sl->isl_fileobj = NULL;
700 sl->isl_completionfunc = NULL;
701 sl->isl_parameters.isl_ioctl.isl_iocode = iocode;
702 sl->isl_parameters.isl_ioctl.isl_ibuflen = ilen;
703 sl->isl_parameters.isl_ioctl.isl_obuflen = olen;
705 switch(IO_METHOD(iocode)) {
706 case METHOD_BUFFERED:
707 if (ilen > olen)
708 buflen = ilen;
709 else
710 buflen = olen;
711 if (buflen) {
712 ip->irp_assoc.irp_sysbuf =
713 ExAllocatePoolWithTag(NonPagedPool, buflen, 0);
714 if (ip->irp_assoc.irp_sysbuf == NULL) {
715 IoFreeIrp(ip);
716 return(NULL);
719 if (ilen && ibuf != NULL) {
720 memcpy( ip->irp_assoc.irp_sysbuf, ibuf, ilen);
721 memset((char *)ip->irp_assoc.irp_sysbuf + ilen, 0,
722 buflen - ilen);
723 } else
724 memset(ip->irp_assoc.irp_sysbuf, 0, ilen);
725 ip->irp_userbuf = obuf;
726 break;
727 case METHOD_IN_DIRECT:
728 case METHOD_OUT_DIRECT:
729 if (ilen && ibuf != NULL) {
730 ip->irp_assoc.irp_sysbuf =
731 ExAllocatePoolWithTag(NonPagedPool, ilen, 0);
732 if (ip->irp_assoc.irp_sysbuf == NULL) {
733 IoFreeIrp(ip);
734 return(NULL);
736 memcpy( ip->irp_assoc.irp_sysbuf, ibuf, ilen);
738 if (olen && obuf != NULL) {
739 ip->irp_mdl = IoAllocateMdl(obuf, olen,
740 FALSE, FALSE, ip);
742 * Normally we would MmProbeAndLockPages()
743 * here, but we don't have to in our
744 * imlementation.
747 break;
748 case METHOD_NEITHER:
749 ip->irp_userbuf = obuf;
750 sl->isl_parameters.isl_ioctl.isl_type3ibuf = ibuf;
751 break;
752 default:
753 break;
757 * Ideally, we should associate this IRP with the calling
758 * thread here.
761 return (ip);
764 __stdcall static irp *
765 IoAllocateIrp(
766 uint8_t stsize,
767 uint8_t chargequota)
769 irp *i;
771 i = ExAllocatePoolWithTag(NonPagedPool, IoSizeOfIrp(stsize), 0);
772 if (i == NULL)
773 return (NULL);
775 IoInitializeIrp(i, IoSizeOfIrp(stsize), stsize);
777 return (i);
780 __stdcall static irp *
781 IoMakeAssociatedIrp(irp *ip, uint8_t stsize)
783 irp *associrp;
784 #ifdef __NetBSD__
785 int s;
786 #endif
788 associrp = IoAllocateIrp(stsize, FALSE);
789 if (associrp == NULL)
790 return(NULL);
792 #ifdef __NetBSD__
793 DISPATCH_LOCK();
794 #else
795 mtx_lock(&ntoskrnl_dispatchlock);
796 #endif
798 associrp->irp_flags |= IRP_ASSOCIATED_IRP;
799 associrp->irp_tail.irp_overlay.irp_thread =
800 ip->irp_tail.irp_overlay.irp_thread;
801 associrp->irp_assoc.irp_master = ip;
803 #ifdef __FreeBSD__
804 mtx_unlock(&ntoskrnl_dispatchlock);
805 #else /* __NetBSD__ */
806 DISPATCH_UNLOCK();
807 #endif
809 return(associrp);
812 __stdcall static void
813 IoFreeIrp(irp *ip)
815 ExFreePool(ip);
816 return;
819 __stdcall static void
820 IoInitializeIrp(irp *io, uint16_t psize, uint8_t ssize)
822 memset((char *)io, 0, IoSizeOfIrp(ssize));
823 io->irp_size = psize;
824 io->irp_stackcnt = ssize;
825 io->irp_currentstackloc = ssize;
826 INIT_LIST_HEAD(&io->irp_thlist);
827 io->irp_tail.irp_overlay.irp_csl =
828 (io_stack_location *)(io + 1) + ssize;
830 return;
833 __stdcall static void
834 IoReuseIrp(irp *ip, uint32_t status)
836 uint8_t allocflags;
838 allocflags = ip->irp_allocflags;
839 IoInitializeIrp(ip, ip->irp_size, ip->irp_stackcnt);
840 ip->irp_iostat.isb_status = status;
841 ip->irp_allocflags = allocflags;
843 return;
846 __stdcall void
847 IoAcquireCancelSpinLock(uint8_t *irql)
849 KeAcquireSpinLock(&ntoskrnl_cancellock, irql);
850 return;
853 __stdcall void
854 IoReleaseCancelSpinLock(uint8_t irql)
856 KeReleaseSpinLock(&ntoskrnl_cancellock, irql);
857 return;
860 __stdcall uint8_t
861 IoCancelIrp(irp *ip)
863 cancel_func cfunc;
865 IoAcquireCancelSpinLock(&ip->irp_cancelirql);
866 cfunc = IoSetCancelRoutine(ip, NULL);
867 ip->irp_cancel = TRUE;
868 if (ip->irp_cancelfunc == NULL) {
869 IoReleaseCancelSpinLock(ip->irp_cancelirql);
870 return(FALSE);
872 MSCALL2(cfunc, IoGetCurrentIrpStackLocation(ip)->isl_devobj, ip);
873 return(TRUE);
876 __fastcall uint32_t
877 IofCallDriver(REGARGS2(device_object *dobj, irp *ip))
879 driver_object *drvobj;
880 io_stack_location *sl;
881 uint32_t status;
882 driver_dispatch disp;
884 drvobj = dobj->do_drvobj;
886 if (ip->irp_currentstackloc <= 0)
887 panic("IoCallDriver(): out of stack locations");
889 IoSetNextIrpStackLocation(ip);
890 sl = IoGetCurrentIrpStackLocation(ip);
892 sl->isl_devobj = dobj;
894 disp = drvobj->dro_dispatch[sl->isl_major];
895 status = MSCALL2(disp, dobj, ip);
897 return(status);
900 __fastcall void
901 IofCompleteRequest(REGARGS2(irp *ip, uint8_t prioboost))
903 uint32_t i;
904 uint32_t status;
905 device_object *dobj;
906 io_stack_location *sl;
907 completion_func cf;
909 ip->irp_pendingreturned =
910 IoGetCurrentIrpStackLocation(ip)->isl_ctl & SL_PENDING_RETURNED;
911 sl = (io_stack_location *)(ip + 1);
913 for (i = ip->irp_currentstackloc; i < (uint32_t)ip->irp_stackcnt; i++) {
914 if (ip->irp_currentstackloc < ip->irp_stackcnt - 1) {
915 IoSkipCurrentIrpStackLocation(ip);
916 dobj = IoGetCurrentIrpStackLocation(ip)->isl_devobj;
917 } else
918 dobj = NULL;
920 if (sl[i].isl_completionfunc != NULL &&
921 ((ip->irp_iostat.isb_status == STATUS_SUCCESS &&
922 sl->isl_ctl & SL_INVOKE_ON_SUCCESS) ||
923 (ip->irp_iostat.isb_status != STATUS_SUCCESS &&
924 sl->isl_ctl & SL_INVOKE_ON_ERROR) ||
925 (ip->irp_cancel == TRUE &&
926 sl->isl_ctl & SL_INVOKE_ON_CANCEL))) {
927 cf = sl->isl_completionfunc;
928 status = MSCALL3(cf, dobj, ip, sl->isl_completionctx);
929 if (status == STATUS_MORE_PROCESSING_REQUIRED)
930 return;
933 if (IoGetCurrentIrpStackLocation(ip)->isl_ctl &
934 SL_PENDING_RETURNED)
935 ip->irp_pendingreturned = TRUE;
938 /* Handle any associated IRPs. */
940 if (ip->irp_flags & IRP_ASSOCIATED_IRP) {
941 uint32_t masterirpcnt;
942 irp *masterirp;
943 mdl *m;
945 masterirp = ip->irp_assoc.irp_master;
946 masterirpcnt = FASTCALL1(InterlockedDecrement,
947 &masterirp->irp_assoc.irp_irpcnt);
949 while ((m = ip->irp_mdl) != NULL) {
950 ip->irp_mdl = m->mdl_next;
951 IoFreeMdl(m);
953 IoFreeIrp(ip);
954 if (masterirpcnt == 0)
955 IoCompleteRequest(masterirp, IO_NO_INCREMENT);
956 return;
959 /* With any luck, these conditions will never arise. */
961 if (ip->irp_flags & (IRP_PAGING_IO|IRP_CLOSE_OPERATION)) {
962 if (ip->irp_usriostat != NULL)
963 *ip->irp_usriostat = ip->irp_iostat;
964 if (ip->irp_usrevent != NULL)
965 KeSetEvent(ip->irp_usrevent, prioboost, FALSE);
966 if (ip->irp_flags & IRP_PAGING_IO) {
967 if (ip->irp_mdl != NULL)
968 IoFreeMdl(ip->irp_mdl);
969 IoFreeIrp(ip);
973 return;
976 __stdcall device_object *
977 IoAttachDeviceToDeviceStack(device_object *src, device_object *dst)
979 device_object *attached;
980 #ifdef __NetBSD__
981 int s;
982 #endif
984 #ifdef __NetBSD__
985 DISPATCH_LOCK();
986 #else
987 mtx_lock(&ntoskrnl_dispatchlock);
988 #endif
990 attached = IoGetAttachedDevice(dst);
991 attached->do_attacheddev = src;
992 src->do_attacheddev = NULL;
993 src->do_stacksize = attached->do_stacksize + 1;
995 #ifdef __FreeBSD__
996 mtx_unlock(&ntoskrnl_dispatchlock);
997 #else /* __NetBSD__ */
998 DISPATCH_UNLOCK();
999 #endif
1001 return(attached);
1004 __stdcall void
1005 IoDetachDevice(device_object *topdev)
1007 device_object *tail;
1008 #ifdef __NetBSD__
1009 int s;
1010 #endif
1012 #ifdef __NetBSD__
1013 DISPATCH_LOCK();
1014 #else
1015 mtx_lock(&ntoskrnl_dispatchlock);
1016 #endif
1018 /* First, break the chain. */
1019 tail = topdev->do_attacheddev;
1020 if (tail == NULL) {
1021 #ifdef __FreeBSD__
1022 mtx_unlock(&ntoskrnl_dispatchlock);
1023 #else /* __NetBSD__ */
1024 DISPATCH_UNLOCK();
1025 #endif
1026 return;
1028 topdev->do_attacheddev = tail->do_attacheddev;
1029 topdev->do_refcnt--;
1031 /* Now reduce the stacksize count for the tail objects. */
1033 tail = topdev->do_attacheddev;
1034 while (tail != NULL) {
1035 tail->do_stacksize--;
1036 tail = tail->do_attacheddev;
1039 #ifdef __FreeBSD__
1040 mtx_unlock(&ntoskrnl_dispatchlock);
1041 #else /* __NetBSD__ */
1042 DISPATCH_UNLOCK();
1043 #endif
1045 return;
1048 /* Always called with dispatcher lock held. */
1049 static void
1050 ntoskrnl_wakeup(void *arg)
1052 nt_dispatch_header *obj;
1053 wait_block *w;
1054 list_entry *e;
1055 #ifdef __FreeBSD__
1056 struct thread *td;
1057 #endif
1059 obj = arg;
1061 obj->dh_sigstate = TRUE;
1062 e = obj->dh_waitlisthead.nle_flink;
1063 while (e != &obj->dh_waitlisthead) {
1064 w = (wait_block *)e;
1065 /* TODO: is this correct? */
1066 #ifdef __FreeBSD__
1067 td = w->wb_kthread;
1068 ndis_thresume(td->td_proc);
1069 #else
1070 ndis_thresume(curproc);
1071 #endif
1073 * For synchronization objects, only wake up
1074 * the first waiter.
1076 if (obj->dh_type == EVENT_TYPE_SYNC)
1077 break;
1078 e = e->nle_flink;
1081 return;
1084 static void
1085 ntoskrnl_time(uint64_t *tval)
1087 struct timespec ts;
1088 #ifdef __NetBSD__
1089 struct timeval tv;
1090 microtime(&tv);
1091 TIMEVAL_TO_TIMESPEC(&tv,&ts);
1092 #else
1093 nanotime(&ts);
1094 #endif
1096 *tval = (uint64_t)ts.tv_nsec / 100 + (uint64_t)ts.tv_sec * 10000000 +
1097 (uint64_t)11644473600ULL;
1099 return;
1103 * KeWaitForSingleObject() is a tricky beast, because it can be used
1104 * with several different object types: semaphores, timers, events,
1105 * mutexes and threads. Semaphores don't appear very often, but the
1106 * other object types are quite common. KeWaitForSingleObject() is
1107 * what's normally used to acquire a mutex, and it can be used to
1108 * wait for a thread termination.
1110 * The Windows NDIS API is implemented in terms of Windows kernel
1111 * primitives, and some of the object manipulation is duplicated in
1112 * NDIS. For example, NDIS has timers and events, which are actually
1113 * Windows kevents and ktimers. Now, you're supposed to only use the
1114 * NDIS variants of these objects within the confines of the NDIS API,
1115 * but there are some naughty developers out there who will use
1116 * KeWaitForSingleObject() on NDIS timer and event objects, so we
1117 * have to support that as well. Conseqently, our NDIS timer and event
1118 * code has to be closely tied into our ntoskrnl timer and event code,
1119 * just as it is in Windows.
1121 * KeWaitForSingleObject() may do different things for different kinds
1122 * of objects:
1124 * - For events, we check if the event has been signalled. If the
1125 * event is already in the signalled state, we just return immediately,
1126 * otherwise we wait for it to be set to the signalled state by someone
1127 * else calling KeSetEvent(). Events can be either synchronization or
1128 * notification events.
1130 * - For timers, if the timer has already fired and the timer is in
1131 * the signalled state, we just return, otherwise we wait on the
1132 * timer. Unlike an event, timers get signalled automatically when
1133 * they expire rather than someone having to trip them manually.
1134 * Timers initialized with KeInitializeTimer() are always notification
1135 * events: KeInitializeTimerEx() lets you initialize a timer as
1136 * either a notification or synchronization event.
1138 * - For mutexes, we try to acquire the mutex and if we can't, we wait
1139 * on the mutex until it's available and then grab it. When a mutex is
1140 * released, it enters the signaled state, which wakes up one of the
1141 * threads waiting to acquire it. Mutexes are always synchronization
1142 * events.
1144 * - For threads, the only thing we do is wait until the thread object
1145 * enters a signalled state, which occurs when the thread terminates.
1146 * Threads are always notification events.
1148 * A notification event wakes up all threads waiting on an object. A
1149 * synchronization event wakes up just one. Also, a synchronization event
1150 * is auto-clearing, which means we automatically set the event back to
1151 * the non-signalled state once the wakeup is done.
1154 __stdcall uint32_t
1155 KeWaitForSingleObject(
1156 nt_dispatch_header *obj,
1157 uint32_t reason,
1158 uint32_t mode,
1159 uint8_t alertable,
1160 int64_t *duetime)
1162 #ifdef __FreeBSD__
1163 struct thread *td = curthread;
1164 #endif
1165 kmutant *km;
1166 wait_block w;
1167 struct timeval tv;
1168 int error = 0;
1169 uint64_t curtime;
1170 #ifdef __NetBSD__
1171 int s;
1172 #endif
1174 if (obj == NULL)
1175 return(STATUS_INVALID_PARAMETER);
1177 #ifdef __NetBSD__
1178 DISPATCH_LOCK();
1179 #else
1180 mtx_lock(&ntoskrnl_dispatchlock);
1181 #endif
1184 * See if the object is a mutex. If so, and we already own
1185 * it, then just increment the acquisition count and return.
1187 * For any other kind of object, see if it's already in the
1188 * signalled state, and if it is, just return. If the object
1189 * is marked as a synchronization event, reset the state to
1190 * unsignalled.
1193 if (obj->dh_size == OTYPE_MUTEX) {
1194 km = (kmutant *)obj;
1195 if (km->km_ownerthread == NULL ||
1196 #ifdef __FreeBSD__
1197 km->km_ownerthread == curthread->td_proc) {
1198 #else
1199 km->km_ownerthread == curproc) {
1200 #endif
1201 obj->dh_sigstate = FALSE;
1202 km->km_acquirecnt++;
1203 #ifdef __FreeBSD__
1204 km->km_ownerthread = curthread->td_proc;
1205 #else
1206 km->km_ownerthread = curproc;
1207 #endif
1209 #ifdef __FreeBSD__
1210 mtx_unlock(&ntoskrnl_dispatchlock);
1211 #else /* __NetBSD__ */
1212 DISPATCH_UNLOCK();
1213 #endif
1214 return (STATUS_SUCCESS);
1216 } else if (obj->dh_sigstate == TRUE) {
1217 if (obj->dh_type == EVENT_TYPE_SYNC)
1218 obj->dh_sigstate = FALSE;
1220 #ifdef __FreeBSD__
1221 mtx_unlock(&ntoskrnl_dispatchlock);
1222 #else /* __NetBSD__ */
1223 DISPATCH_UNLOCK();
1224 #endif
1225 return (STATUS_SUCCESS);
1228 w.wb_object = obj;
1229 #ifdef __FreeBSD__
1230 w.wb_kthread = td;
1231 #endif
1233 INSERT_LIST_TAIL((&obj->dh_waitlisthead), (&w.wb_waitlist));
1236 * The timeout value is specified in 100 nanosecond units
1237 * and can be a positive or negative number. If it's positive,
1238 * then the duetime is absolute, and we need to convert it
1239 * to an absolute offset relative to now in order to use it.
1240 * If it's negative, then the duetime is relative and we
1241 * just have to convert the units.
1244 if (duetime != NULL) {
1245 if (*duetime < 0) {
1246 tv.tv_sec = - (*duetime) / 10000000;
1247 tv.tv_usec = (- (*duetime) / 10) -
1248 (tv.tv_sec * 1000000);
1249 } else {
1250 ntoskrnl_time(&curtime);
1251 if (*duetime < curtime)
1252 tv.tv_sec = tv.tv_usec = 0;
1253 else {
1254 tv.tv_sec = ((*duetime) - curtime) / 10000000;
1255 tv.tv_usec = ((*duetime) - curtime) / 10 -
1256 (tv.tv_sec * 1000000);
1261 #ifdef __FreeBSD__
1262 error = ndis_thsuspend(td->td_proc, &ntoskrnl_dispatchlock,
1263 duetime == NULL ? 0 : tvtohz(&tv));
1264 #else
1265 error = ndis_thsuspend(curproc, &ntoskrnl_dispatchlock,
1266 duetime == NULL ? 0 : tvtohz(&tv));
1267 #endif
1269 /* We timed out. Leave the object alone and return status. */
1271 if (error == EWOULDBLOCK) {
1272 REMOVE_LIST_ENTRY((&w.wb_waitlist));
1273 #ifdef __FreeBSD__
1274 mtx_unlock(&ntoskrnl_dispatchlock);
1275 #else /* __NetBSD__ */
1276 DISPATCH_UNLOCK();
1277 #endif
1278 return(STATUS_TIMEOUT);
1282 * Mutexes are always synchronization objects, which means
1283 * if several threads are waiting to acquire it, only one will
1284 * be woken up. If that one is us, and the mutex is up for grabs,
1285 * grab it.
1288 if (obj->dh_size == OTYPE_MUTEX) {
1289 km = (kmutant *)obj;
1290 if (km->km_ownerthread == NULL) {
1291 #ifdef __FreeBSD__
1292 km->km_ownerthread = curthread->td_proc;
1293 #else
1294 km->km_ownerthread = curproc;
1295 #endif
1296 km->km_acquirecnt++;
1300 if (obj->dh_type == EVENT_TYPE_SYNC)
1301 obj->dh_sigstate = FALSE;
1302 REMOVE_LIST_ENTRY((&w.wb_waitlist));
1304 #ifdef __FreeBSD__
1305 mtx_unlock(&ntoskrnl_dispatchlock);
1306 #else /* __NetBSD__ */
1307 DISPATCH_UNLOCK();
1308 #endif
1310 return(STATUS_SUCCESS);
1313 __stdcall static uint32_t
1314 KeWaitForMultipleObjects(
1315 uint32_t cnt,
1316 nt_dispatch_header *obj[],
1317 uint32_t wtype,
1318 uint32_t reason,
1319 uint32_t mode,
1320 uint8_t alertable,
1321 int64_t *duetime,
1322 wait_block *wb_array)
1324 #ifdef __FreeBSD__
1325 struct thread *td = curthread;
1326 #endif
1327 kmutant *km;
1328 wait_block _wb_array[THREAD_WAIT_OBJECTS];
1329 wait_block *w;
1330 struct timeval tv;
1331 int i, wcnt = 0, widx = 0, error = 0;
1332 uint64_t curtime;
1333 struct timespec t1, t2;
1334 #ifdef __NetBSD__
1335 struct timeval tv1,tv2;
1336 int s;
1337 #endif
1340 if (cnt > MAX_WAIT_OBJECTS)
1341 return(STATUS_INVALID_PARAMETER);
1342 if (cnt > THREAD_WAIT_OBJECTS && wb_array == NULL)
1343 return(STATUS_INVALID_PARAMETER);
1345 #ifdef __NetBSD__
1346 DISPATCH_LOCK();
1347 #else
1348 mtx_lock(&ntoskrnl_dispatchlock);
1349 #endif
1351 if (wb_array == NULL)
1352 w = &_wb_array[0];
1353 else
1354 w = wb_array;
1356 /* First pass: see if we can satisfy any waits immediately. */
1358 for (i = 0; i < cnt; i++) {
1359 if (obj[i]->dh_size == OTYPE_MUTEX) {
1360 km = (kmutant *)obj[i];
1361 if (km->km_ownerthread == NULL ||
1362 #ifdef __FreeBSD__
1363 km->km_ownerthread == curthread->td_proc) {
1364 #else
1365 km->km_ownerthread == curproc) {
1366 #endif
1367 obj[i]->dh_sigstate = FALSE;
1368 km->km_acquirecnt++;
1369 #ifdef __FreeBSD__
1370 km->km_ownerthread = curthread->td_proc;
1371 #else
1372 km->km_ownerthread = curproc;
1373 #endif
1374 if (wtype == WAITTYPE_ANY) {
1375 #ifdef __FreeBSD__
1376 mtx_unlock(&ntoskrnl_dispatchlock);
1377 #else /* __NetBSD__ */
1378 DISPATCH_UNLOCK();
1379 #endif
1380 return (STATUS_WAIT_0 + i);
1383 } else if (obj[i]->dh_sigstate == TRUE) {
1384 if (obj[i]->dh_type == EVENT_TYPE_SYNC)
1385 obj[i]->dh_sigstate = FALSE;
1386 if (wtype == WAITTYPE_ANY) {
1387 #ifdef __FreeBSD__
1388 mtx_unlock(&ntoskrnl_dispatchlock);
1389 #else /* __NetBSD__ */
1390 DISPATCH_UNLOCK();
1391 #endif
1392 return (STATUS_WAIT_0 + i);
1398 * Second pass: set up wait for anything we can't
1399 * satisfy immediately.
1402 for (i = 0; i < cnt; i++) {
1403 if (obj[i]->dh_sigstate == TRUE)
1404 continue;
1405 INSERT_LIST_TAIL((&obj[i]->dh_waitlisthead),
1406 (&w[i].wb_waitlist));
1407 #ifdef __FreeBSD__
1408 w[i].wb_kthread = td;
1409 #endif
1410 w[i].wb_object = obj[i];
1411 wcnt++;
1414 if (duetime != NULL) {
1415 if (*duetime < 0) {
1416 tv.tv_sec = - (*duetime) / 10000000;
1417 tv.tv_usec = (- (*duetime) / 10) -
1418 (tv.tv_sec * 1000000);
1419 } else {
1420 ntoskrnl_time(&curtime);
1421 if (*duetime < curtime)
1422 tv.tv_sec = tv.tv_usec = 0;
1423 else {
1424 tv.tv_sec = ((*duetime) - curtime) / 10000000;
1425 tv.tv_usec = ((*duetime) - curtime) / 10 -
1426 (tv.tv_sec * 1000000);
1431 while (wcnt) {
1432 #ifdef __FreeBSD__
1433 nanotime(&t1);
1434 #else
1435 microtime(&tv1);
1436 TIMEVAL_TO_TIMESPEC(&tv1,&t1);
1437 #endif
1439 #ifdef __FreeBSD__
1440 error = ndis_thsuspend(td->td_proc, &ntoskrnl_dispatchlock,
1441 duetime == NULL ? 0 : tvtohz(&tv));
1442 #else
1443 error = ndis_thsuspend(curproc, &ntoskrnl_dispatchlock,
1444 duetime == NULL ? 0 : tvtohz(&tv));
1445 #endif
1446 #ifdef __FreeBSD__
1447 nanotime(&t2);
1448 #else
1449 microtime(&tv2);
1450 TIMEVAL_TO_TIMESPEC(&tv2,&t2);
1451 #endif
1453 for (i = 0; i < cnt; i++) {
1454 if (obj[i]->dh_size == OTYPE_MUTEX) {
1455 km = (kmutant *)obj;
1456 if (km->km_ownerthread == NULL) {
1457 km->km_ownerthread =
1458 #ifdef __FreeBSD__
1459 curthread->td_proc;
1460 #else
1461 curproc;
1462 #endif
1463 km->km_acquirecnt++;
1466 if (obj[i]->dh_sigstate == TRUE) {
1467 widx = i;
1468 if (obj[i]->dh_type == EVENT_TYPE_SYNC)
1469 obj[i]->dh_sigstate = FALSE;
1470 REMOVE_LIST_ENTRY((&w[i].wb_waitlist));
1471 wcnt--;
1475 if (error || wtype == WAITTYPE_ANY)
1476 break;
1478 if (duetime != NULL) {
1479 tv.tv_sec -= (t2.tv_sec - t1.tv_sec);
1480 tv.tv_usec -= (t2.tv_nsec - t1.tv_nsec) / 1000;
1484 if (wcnt) {
1485 for (i = 0; i < cnt; i++)
1486 REMOVE_LIST_ENTRY((&w[i].wb_waitlist));
1489 if (error == EWOULDBLOCK) {
1490 #ifdef __FreeBSD__
1491 mtx_unlock(&ntoskrnl_dispatchlock);
1492 #else /* __NetBSD__ */
1493 DISPATCH_UNLOCK();
1494 #endif
1495 return(STATUS_TIMEOUT);
1498 if (wtype == WAITTYPE_ANY && wcnt) {
1499 #ifdef __FreeBSD__
1500 mtx_unlock(&ntoskrnl_dispatchlock);
1501 #else /* __NetBSD__ */
1502 DISPATCH_UNLOCK();
1503 #endif
1504 return(STATUS_WAIT_0 + widx);
1507 #ifdef __FreeBSD__
1508 mtx_unlock(&ntoskrnl_dispatchlock);
1509 #else /* __NetBSD__ */
1510 DISPATCH_UNLOCK();
1511 #endif
1513 return(STATUS_SUCCESS);
1516 __stdcall static void
1517 WRITE_REGISTER_USHORT(uint16_t *reg, uint16_t val)
1519 bus_space_write_2(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg, val);
1520 return;
1523 __stdcall static uint16_t
1524 READ_REGISTER_USHORT(uint16_t *reg)
1526 return(bus_space_read_2(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg));
1529 __stdcall static void
1530 WRITE_REGISTER_ULONG(uint32_t *reg, uint32_t val)
1532 bus_space_write_4(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg, val);
1533 return;
1536 __stdcall static uint32_t
1537 READ_REGISTER_ULONG(uint32_t *reg)
1539 return(bus_space_read_4(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg));
1542 __stdcall static uint8_t
1543 READ_REGISTER_UCHAR(uint8_t *reg)
1545 return(bus_space_read_1(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg));
1548 __stdcall static void
1549 WRITE_REGISTER_UCHAR(uint8_t *reg, uint8_t val)
1551 bus_space_write_1(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg, val);
1552 return;
1555 __stdcall static int64_t
1556 _allmul(int64_t a, int64_t b)
1558 return (a * b);
1561 __stdcall static int64_t
1562 _alldiv(int64_t a, int64_t b)
1564 return (a / b);
1567 __stdcall static int64_t
1568 _allrem(int64_t a, int64_t b)
1570 return (a % b);
1573 __stdcall static uint64_t
1574 _aullmul(uint64_t a, uint64_t b)
1576 return (a * b);
1579 __stdcall static uint64_t
1580 _aulldiv(uint64_t a, uint64_t b)
1582 return (a / b);
1585 __stdcall static uint64_t
1586 _aullrem(uint64_t a, uint64_t b)
1588 return (a % b);
1591 __regparm static int64_t
1592 _allshl(int64_t a, uint8_t b)
1594 return (a << b);
1597 __regparm static uint64_t
1598 _aullshl(uint64_t a, uint8_t b)
1600 return (a << b);
1603 __regparm static int64_t
1604 _allshr(int64_t a, uint8_t b)
1606 return (a >> b);
1609 __regparm static uint64_t
1610 _aullshr(uint64_t a, uint8_t b)
1612 return (a >> b);
1615 static slist_entry *
1616 ntoskrnl_pushsl(slist_header *head, slist_entry *entry)
1618 slist_entry *oldhead;
1620 oldhead = head->slh_list.slh_next;
1621 entry->sl_next = head->slh_list.slh_next;
1622 head->slh_list.slh_next = entry;
1623 head->slh_list.slh_depth++;
1624 head->slh_list.slh_seq++;
1626 return(oldhead);
1629 static slist_entry *
1630 ntoskrnl_popsl(slist_header *head)
1632 slist_entry *first;
1634 first = head->slh_list.slh_next;
1635 if (first != NULL) {
1636 head->slh_list.slh_next = first->sl_next;
1637 head->slh_list.slh_depth--;
1638 head->slh_list.slh_seq++;
1641 return(first);
1645 * We need this to make lookaside lists work for amd64.
1646 * We pass a pointer to ExAllocatePoolWithTag() the lookaside
1647 * list structure. For amd64 to work right, this has to be a
1648 * pointer to the wrapped version of the routine, not the
1649 * original. Letting the Windows driver invoke the original
1650 * function directly will result in a convention calling
1651 * mismatch and a pretty crash. On x86, this effectively
1652 * becomes a no-op since ipt_func and ipt_wrap are the same.
1655 static funcptr
1656 ntoskrnl_findwrap(funcptr func)
1658 image_patch_table *patch;
1660 patch = ntoskrnl_functbl;
1661 while (patch->ipt_func != NULL) {
1662 if ((funcptr)patch->ipt_func == func)
1663 return((funcptr)patch->ipt_wrap);
1664 patch++;
1667 return(NULL);
1670 __stdcall static void
1671 ExInitializePagedLookasideList(
1672 paged_lookaside_list *lookaside,
1673 lookaside_alloc_func *allocfunc,
1674 lookaside_free_func *freefunc,
1675 uint32_t flags,
1676 size_t size,
1677 uint32_t tag,
1678 uint16_t depth)
1680 memset((char *)lookaside, 0, sizeof(paged_lookaside_list));
1682 if (size < sizeof(slist_entry))
1683 lookaside->nll_l.gl_size = sizeof(slist_entry);
1684 else
1685 lookaside->nll_l.gl_size = size;
1686 lookaside->nll_l.gl_tag = tag;
1687 if (allocfunc == NULL)
1688 lookaside->nll_l.gl_allocfunc =
1689 ntoskrnl_findwrap((funcptr)ExAllocatePoolWithTag);
1690 else
1691 lookaside->nll_l.gl_allocfunc = allocfunc;
1693 if (freefunc == NULL)
1694 lookaside->nll_l.gl_freefunc =
1695 ntoskrnl_findwrap((funcptr)ExFreePool);
1696 else
1697 lookaside->nll_l.gl_freefunc = freefunc;
1699 #ifdef __i386__
1700 KeInitializeSpinLock(&lookaside->nll_obsoletelock);
1701 #endif
1703 lookaside->nll_l.gl_type = NonPagedPool;
1704 lookaside->nll_l.gl_depth = depth;
1705 lookaside->nll_l.gl_maxdepth = LOOKASIDE_DEPTH;
1707 return;
1710 __stdcall static void
1711 ExDeletePagedLookasideList(paged_lookaside_list *lookaside)
1713 void *buf;
1714 __stdcall void (*freefunc)(void *);
1716 freefunc = lookaside->nll_l.gl_freefunc;
1717 while((buf = ntoskrnl_popsl(&lookaside->nll_l.gl_listhead)) != NULL)
1718 MSCALL1(freefunc, buf);
1720 return;
1723 __stdcall static void
1724 ExInitializeNPagedLookasideList(
1725 npaged_lookaside_list *lookaside,
1726 lookaside_alloc_func *allocfunc,
1727 lookaside_free_func *freefunc,
1728 uint32_t flags,
1729 size_t size,
1730 uint32_t tag,
1731 uint16_t depth)
1733 memset((char *)lookaside, 0, sizeof(npaged_lookaside_list));
1735 if (size < sizeof(slist_entry))
1736 lookaside->nll_l.gl_size = sizeof(slist_entry);
1737 else
1738 lookaside->nll_l.gl_size = size;
1739 lookaside->nll_l.gl_tag = tag;
1740 if (allocfunc == NULL)
1741 lookaside->nll_l.gl_allocfunc =
1742 ntoskrnl_findwrap((funcptr)ExAllocatePoolWithTag);
1743 else
1744 lookaside->nll_l.gl_allocfunc = allocfunc;
1746 if (freefunc == NULL)
1747 lookaside->nll_l.gl_freefunc =
1748 ntoskrnl_findwrap((funcptr)ExFreePool);
1749 else
1750 lookaside->nll_l.gl_freefunc = freefunc;
1752 #ifdef __i386__
1753 KeInitializeSpinLock(&lookaside->nll_obsoletelock);
1754 #endif
1756 lookaside->nll_l.gl_type = NonPagedPool;
1757 lookaside->nll_l.gl_depth = depth;
1758 lookaside->nll_l.gl_maxdepth = LOOKASIDE_DEPTH;
1760 return;
1763 __stdcall static void
1764 ExDeleteNPagedLookasideList(npaged_lookaside_list *lookaside)
1766 void *buf;
1767 __stdcall void (*freefunc)(void *);
1769 freefunc = lookaside->nll_l.gl_freefunc;
1770 while((buf = ntoskrnl_popsl(&lookaside->nll_l.gl_listhead)) != NULL)
1771 MSCALL1(freefunc, buf);
1773 return;
1777 * Note: the interlocked slist push and pop routines are
1778 * declared to be _fastcall in Windows. gcc 3.4 is supposed
1779 * to have support for this calling convention, however we
1780 * don't have that version available yet, so we kludge things
1781 * up using __regparm__(3) and some argument shuffling.
1784 __fastcall static slist_entry *
1785 InterlockedPushEntrySList(REGARGS2(slist_header *head, slist_entry *entry))
1787 slist_entry *oldhead;
1789 oldhead = (slist_entry *)FASTCALL3(ExInterlockedPushEntrySList,
1790 head, entry, &ntoskrnl_global);
1792 return(oldhead);
1795 __fastcall static slist_entry *
1796 InterlockedPopEntrySList(REGARGS1(slist_header *head))
1798 slist_entry *first;
1800 first = (slist_entry *)FASTCALL2(ExInterlockedPopEntrySList,
1801 head, &ntoskrnl_global);
1803 return(first);
1806 __fastcall static slist_entry *
1807 ExInterlockedPushEntrySList(REGARGS2(slist_header *head,
1808 slist_entry *entry), kspin_lock *lock)
1810 slist_entry *oldhead;
1811 uint8_t irql;
1813 KeAcquireSpinLock(lock, &irql);
1814 oldhead = ntoskrnl_pushsl(head, entry);
1815 KeReleaseSpinLock(lock, irql);
1817 return(oldhead);
1820 __fastcall static slist_entry *
1821 ExInterlockedPopEntrySList(REGARGS2(slist_header *head, kspin_lock *lock))
1823 slist_entry *first;
1824 uint8_t irql;
1826 KeAcquireSpinLock(lock, &irql);
1827 first = ntoskrnl_popsl(head);
1828 KeReleaseSpinLock(lock, irql);
1830 return(first);
1833 __stdcall static uint16_t
1834 ExQueryDepthSList(slist_header *head)
1836 uint16_t depth;
1837 uint8_t irql;
1839 KeAcquireSpinLock(&ntoskrnl_global, &irql);
1840 depth = head->slh_list.slh_depth;
1841 KeReleaseSpinLock(&ntoskrnl_global, irql);
1843 return(depth);
1846 /* TODO: Make sure that LOCKDEBUG isn't defined otherwise a "struct simplelock" will
1847 * TODO: be more than 4 bytes. I'm using a kspin_lock as a simplelock, and the
1848 * TODO: kspin lock is 4 bytes, so this is OK as long as LOCKDEBUG isn't defined.
1852 * The KeInitializeSpinLock(), KefAcquireSpinLockAtDpcLevel()
1853 * and KefReleaseSpinLockFromDpcLevel() appear to be analagous
1854 * to splnet()/splx() in their use. We can't create a new mutex
1855 * lock here because there is no complimentary KeFreeSpinLock()
1856 * function. Instead, we grab a mutex from the mutex pool.
1858 __stdcall void
1859 KeInitializeSpinLock(kspin_lock *lock)
1861 #ifdef __FreeBSD__
1862 *lock = 0;
1863 #else /* __NetBSD__ */
1864 simple_lock_init((struct simplelock *)lock);
1865 #endif
1867 return;
1870 #ifdef __i386__
1871 __fastcall void
1872 KefAcquireSpinLockAtDpcLevel(REGARGS1(kspin_lock *lock))
1874 #ifdef __FreeBSD__
1875 while (atomic_cmpset_acq_int((volatile u_int *)lock, 0, 1) == 0)
1876 /* sit and spin */;
1877 #else /* __NetBSD__ */
1878 simple_lock((struct simplelock *)lock);
1879 #endif
1881 return;
1884 __fastcall void
1885 KefReleaseSpinLockFromDpcLevel(REGARGS1(kspin_lock *lock))
1887 #ifdef __FreeBSD__
1888 atomic_store_rel_int((volatile u_int *)lock, 0);
1889 #else /* __NetBSD__ */
1890 simple_unlock((struct simplelock *)lock);
1891 #endif
1892 return;
1895 __stdcall uint8_t
1896 KeAcquireSpinLockRaiseToDpc(kspin_lock *lock)
1898 uint8_t oldirql;
1900 if (KeGetCurrentIrql() > DISPATCH_LEVEL)
1901 panic("IRQL_NOT_LESS_THAN_OR_EQUAL");
1903 oldirql = KeRaiseIrql(DISPATCH_LEVEL);
1904 KeAcquireSpinLockAtDpcLevel(lock);
1906 return(oldirql);
1908 #else
1909 __stdcall void
1910 KeAcquireSpinLockAtDpcLevel(kspin_lock *lock)
1912 while (atomic_swap_uint((volatile u_int *)lock, 1) == 1)
1913 /* sit and spin */;
1915 return;
1918 __stdcall void
1919 KeReleaseSpinLockFromDpcLevel(kspin_lock *lock)
1921 *(volatile u_int *)lock = 0;
1923 return;
1925 #endif /* __i386__ */
1927 __fastcall uintptr_t
1928 InterlockedExchange(REGARGS2(volatile uint32_t *dst, uintptr_t val))
1930 uint8_t irql;
1931 uintptr_t r;
1933 KeAcquireSpinLock(&ntoskrnl_global, &irql);
1934 r = *dst;
1935 *dst = val;
1936 KeReleaseSpinLock(&ntoskrnl_global, irql);
1938 return(r);
1941 __fastcall static uint32_t
1942 InterlockedIncrement(REGARGS1(volatile uint32_t *addend))
1944 atomic_inc_32(addend);
1945 return(*addend);
1948 __fastcall static uint32_t
1949 InterlockedDecrement(REGARGS1(volatile uint32_t *addend))
1951 atomic_dec_32(addend);
1952 return(*addend);
1955 __fastcall static void
1956 ExInterlockedAddLargeStatistic(REGARGS2(uint64_t *addend, uint32_t inc))
1958 uint8_t irql;
1960 KeAcquireSpinLock(&ntoskrnl_global, &irql);
1961 *addend += inc;
1962 KeReleaseSpinLock(&ntoskrnl_global, irql);
1964 return;
1967 __stdcall mdl *
1968 IoAllocateMdl(
1969 void *vaddr,
1970 uint32_t len,
1971 uint8_t secondarybuf,
1972 uint8_t chargequota,
1973 irp *iopkt)
1975 mdl *m;
1976 int zone = 0;
1978 if (MmSizeOfMdl(vaddr, len) > MDL_ZONE_SIZE)
1979 m = ExAllocatePoolWithTag(NonPagedPool,
1980 MmSizeOfMdl(vaddr, len), 0);
1981 else {
1982 #ifdef __FreeBSD__
1983 m = uma_zalloc(mdl_zone, M_NOWAIT | M_ZERO);
1984 #else
1985 m = pool_get(&mdl_pool, PR_WAITOK);
1986 #endif
1987 zone++;
1990 if (m == NULL)
1991 return (NULL);
1993 MmInitializeMdl(m, vaddr, len);
1996 * MmInitializMdl() clears the flags field, so we
1997 * have to set this here. If the MDL came from the
1998 * MDL UMA zone, tag it so we can release it to
1999 * the right place later.
2001 if (zone)
2002 m->mdl_flags = MDL_ZONE_ALLOCED;
2004 if (iopkt != NULL) {
2005 if (secondarybuf == TRUE) {
2006 mdl *last;
2007 last = iopkt->irp_mdl;
2008 while (last->mdl_next != NULL)
2009 last = last->mdl_next;
2010 last->mdl_next = m;
2011 } else {
2012 if (iopkt->irp_mdl != NULL)
2013 panic("leaking an MDL in IoAllocateMdl()");
2014 iopkt->irp_mdl = m;
2018 return (m);
2021 __stdcall void
2022 IoFreeMdl(mdl *m)
2024 if (m == NULL)
2025 return;
2027 if (m->mdl_flags & MDL_ZONE_ALLOCED)
2028 #ifdef __FreeBSD__
2029 uma_zfree(mdl_zone, m);
2030 #else
2031 pool_put(&mdl_pool, m);
2032 #endif
2033 else
2034 ExFreePool(m);
2036 return;
2039 __stdcall static uint32_t
2040 MmSizeOfMdl(void *vaddr, size_t len)
2042 uint32_t l;
2044 l = sizeof(struct mdl) +
2045 (sizeof(vm_offset_t *) * SPAN_PAGES(vaddr, len));
2047 return(l);
2051 * The Microsoft documentation says this routine fills in the
2052 * page array of an MDL with the _physical_ page addresses that
2053 * comprise the buffer, but we don't really want to do that here.
2054 * Instead, we just fill in the page array with the kernel virtual
2055 * addresses of the buffers.
2057 __stdcall static void
2058 MmBuildMdlForNonPagedPool(mdl *m)
2060 vm_offset_t *mdl_pages;
2061 int pagecnt, i;
2063 pagecnt = SPAN_PAGES(m->mdl_byteoffset, m->mdl_bytecount);
2065 if (pagecnt > (m->mdl_size - sizeof(mdl)) / sizeof(vm_offset_t *))
2066 panic("not enough pages in MDL to describe buffer");
2068 mdl_pages = MmGetMdlPfnArray(m);
2070 for (i = 0; i < pagecnt; i++)
2071 *mdl_pages = (vm_offset_t)m->mdl_startva + (i * PAGE_SIZE);
2073 m->mdl_flags |= MDL_SOURCE_IS_NONPAGED_POOL;
2074 m->mdl_mappedsystemva = MmGetMdlVirtualAddress(m);
2076 return;
2079 __stdcall static void *
2080 MmMapLockedPages(
2081 mdl *buf,
2082 uint8_t accessmode)
2084 buf->mdl_flags |= MDL_MAPPED_TO_SYSTEM_VA;
2085 return(MmGetMdlVirtualAddress(buf));
2088 __stdcall static void *
2089 MmMapLockedPagesSpecifyCache(
2090 mdl *buf,
2091 uint8_t accessmode,
2092 uint32_t cachetype,
2093 void *vaddr,
2094 uint32_t bugcheck,
2095 uint32_t prio)
2097 return(MmMapLockedPages(buf, accessmode));
2100 __stdcall static void
2101 MmUnmapLockedPages(
2102 void *vaddr,
2103 mdl *buf)
2105 buf->mdl_flags &= ~MDL_MAPPED_TO_SYSTEM_VA;
2106 return;
2109 __stdcall static size_t
2110 RtlCompareMemory(const void *s1, const void *s2, size_t len)
2112 size_t i, total = 0;
2113 uint8_t *m1, *m2;
2115 m1 = __DECONST(char *, s1);
2116 m2 = __DECONST(char *, s2);
2118 for (i = 0; i < len; i++) {
2119 if (m1[i] == m2[i])
2120 total++;
2122 return(total);
2125 __stdcall static void
2126 RtlInitAnsiString(ndis_ansi_string *dst, char *src)
2128 ndis_ansi_string *a;
2130 a = dst;
2131 if (a == NULL)
2132 return;
2133 if (src == NULL) {
2134 a->nas_len = a->nas_maxlen = 0;
2135 a->nas_buf = NULL;
2136 } else {
2137 a->nas_buf = src;
2138 a->nas_len = a->nas_maxlen = strlen(src);
2141 return;
2144 __stdcall static void
2145 RtlInitUnicodeString(ndis_unicode_string *dst, uint16_t *src)
2147 ndis_unicode_string *u;
2148 int i;
2150 u = dst;
2151 if (u == NULL)
2152 return;
2153 if (src == NULL) {
2154 u->us_len = u->us_maxlen = 0;
2155 u->us_buf = NULL;
2156 } else {
2157 i = 0;
2158 while(src[i] != 0)
2159 i++;
2160 u->us_buf = src;
2161 u->us_len = u->us_maxlen = i * 2;
2164 return;
2167 __stdcall ndis_status
2168 RtlUnicodeStringToInteger(ndis_unicode_string *ustr, uint32_t base, uint32_t *val)
2170 uint16_t *uchr;
2171 int len, neg = 0;
2172 char abuf[64];
2173 char *astr;
2175 uchr = ustr->us_buf;
2176 len = ustr->us_len;
2177 memset(abuf, 0, sizeof(abuf));
2179 if ((char)((*uchr) & 0xFF) == '-') {
2180 neg = 1;
2181 uchr++;
2182 len -= 2;
2183 } else if ((char)((*uchr) & 0xFF) == '+') {
2184 neg = 0;
2185 uchr++;
2186 len -= 2;
2189 if (base == 0) {
2190 if ((char)((*uchr) & 0xFF) == 'b') {
2191 base = 2;
2192 uchr++;
2193 len -= 2;
2194 } else if ((char)((*uchr) & 0xFF) == 'o') {
2195 base = 8;
2196 uchr++;
2197 len -= 2;
2198 } else if ((char)((*uchr) & 0xFF) == 'x') {
2199 base = 16;
2200 uchr++;
2201 len -= 2;
2202 } else
2203 base = 10;
2206 astr = abuf;
2207 if (neg) {
2208 strcpy(astr, "-");
2209 astr++;
2212 ndis_unicode_to_ascii(uchr, len, &astr);
2213 *val = strtoul(abuf, NULL, base);
2215 return(NDIS_STATUS_SUCCESS);
2218 __stdcall static void
2219 RtlFreeUnicodeString(ndis_unicode_string *ustr)
2221 if (ustr->us_buf == NULL)
2222 return;
2223 free(ustr->us_buf, M_DEVBUF);
2224 ustr->us_buf = NULL;
2225 return;
2228 __stdcall static void
2229 RtlFreeAnsiString(ndis_ansi_string *astr)
2231 if (astr->nas_buf == NULL)
2232 return;
2233 free(astr->nas_buf, M_DEVBUF);
2234 astr->nas_buf = NULL;
2235 return;
2238 static int
2239 atoi(const char *str)
2241 #ifdef __FreeBSD__
2242 return (int)strtol(str, (char **)NULL, 10);
2243 #else
2244 int n;
2246 for (n = 0; *str && *str >= '0' && *str <= '9'; str++)
2247 n = n * 10 + *str - '0';
2248 return n;
2249 #endif
2253 static long
2254 atol(const char *str)
2256 #ifdef __FreeBSD__
2257 return strtol(str, (char **)NULL, 10);
2258 #else
2259 long n;
2261 for (n = 0; *str && *str >= '0' && *str <= '9'; str++)
2262 n = n * 10 + *str - '0';
2263 return n;
2264 #endif
2270 * stolen from ./netipsec/key.c
2273 #ifdef __NetBSD__
2274 void srandom(int);
2275 void srandom(int arg) {return;}
2276 #endif
2279 static int
2280 rand(void)
2282 struct timeval tv;
2284 microtime(&tv);
2285 srandom(tv.tv_usec);
2286 return((int)random());
2289 static void
2290 srand(unsigned int seed)
2292 srandom(seed);
2293 return;
2296 __stdcall static uint8_t
2297 IoIsWdmVersionAvailable(uint8_t major, uint8_t minor)
2299 if (major == WDM_MAJOR && minor == WDM_MINOR_WINXP)
2300 return(TRUE);
2301 return(FALSE);
2304 __stdcall static ndis_status
2305 IoGetDeviceProperty(
2306 device_object *devobj,
2307 uint32_t regprop,
2308 uint32_t buflen,
2309 void *prop,
2310 uint32_t *reslen)
2312 driver_object *drv;
2313 uint16_t **name;
2315 drv = devobj->do_drvobj;
2317 switch (regprop) {
2318 case DEVPROP_DRIVER_KEYNAME:
2319 name = prop;
2320 *name = drv->dro_drivername.us_buf;
2321 *reslen = drv->dro_drivername.us_len;
2322 break;
2323 default:
2324 return(STATUS_INVALID_PARAMETER_2);
2325 break;
2328 return(STATUS_SUCCESS);
2331 __stdcall static void
2332 KeInitializeMutex(
2333 kmutant *kmutex,
2334 uint32_t level)
2336 INIT_LIST_HEAD((&kmutex->km_header.dh_waitlisthead));
2337 kmutex->km_abandoned = FALSE;
2338 kmutex->km_apcdisable = 1;
2339 kmutex->km_header.dh_sigstate = TRUE;
2340 kmutex->km_header.dh_type = EVENT_TYPE_SYNC;
2341 kmutex->km_header.dh_size = OTYPE_MUTEX;
2342 kmutex->km_acquirecnt = 0;
2343 kmutex->km_ownerthread = NULL;
2344 return;
2347 __stdcall static uint32_t
2348 KeReleaseMutex(
2349 kmutant *kmutex,
2350 uint8_t kwait)
2352 #ifdef __NetBSD__
2353 int s;
2354 #endif
2356 #ifdef __NetBSD__
2357 DISPATCH_LOCK();
2358 #else
2359 mtx_lock(&ntoskrnl_dispatchlock);
2360 #endif
2362 #ifdef __FreeBSD__
2363 if (kmutex->km_ownerthread != curthread->td_proc) {
2364 #else
2365 if (kmutex->km_ownerthread != curproc) {
2366 #endif
2367 #ifdef __FreeBSD__
2368 mtx_unlock(&ntoskrnl_dispatchlock);
2369 #else /* __NetBSD__ */
2370 DISPATCH_UNLOCK();
2371 #endif
2372 return(STATUS_MUTANT_NOT_OWNED);
2374 kmutex->km_acquirecnt--;
2375 if (kmutex->km_acquirecnt == 0) {
2376 kmutex->km_ownerthread = NULL;
2377 ntoskrnl_wakeup(&kmutex->km_header);
2380 #ifdef __FreeBSD__
2381 mtx_unlock(&ntoskrnl_dispatchlock);
2382 #else /* __NetBSD__ */
2383 DISPATCH_UNLOCK();
2384 #endif
2386 return(kmutex->km_acquirecnt);
2389 __stdcall static uint32_t
2390 KeReadStateMutex(kmutant *kmutex)
2392 return(kmutex->km_header.dh_sigstate);
2395 __stdcall void
2396 KeInitializeEvent(nt_kevent *kevent, uint32_t type, uint8_t state)
2398 INIT_LIST_HEAD((&kevent->k_header.dh_waitlisthead));
2399 kevent->k_header.dh_sigstate = state;
2400 kevent->k_header.dh_type = type;
2401 kevent->k_header.dh_size = OTYPE_EVENT;
2402 return;
2405 __stdcall uint32_t
2406 KeResetEvent(nt_kevent *kevent)
2408 uint32_t prevstate;
2409 #ifdef __NetBSD__
2410 int s;
2411 #endif
2413 #ifdef __NetBSD__
2414 DISPATCH_LOCK();
2415 #else
2416 mtx_lock(&ntoskrnl_dispatchlock);
2417 #endif
2419 prevstate = kevent->k_header.dh_sigstate;
2420 kevent->k_header.dh_sigstate = FALSE;
2422 #ifdef __FreeBSD__
2423 mtx_unlock(&ntoskrnl_dispatchlock);
2424 #else /* __NetBSD__ */
2425 DISPATCH_UNLOCK();
2426 #endif
2428 return(prevstate);
2431 __stdcall uint32_t
2432 KeSetEvent(
2433 nt_kevent *kevent,
2434 uint32_t increment,
2435 uint8_t kwait)
2437 uint32_t prevstate;
2438 #ifdef __NetBSD__
2439 int s;
2440 #endif
2442 #ifdef __NetBSD__
2443 DISPATCH_LOCK();
2444 #else
2445 mtx_lock(&ntoskrnl_dispatchlock);
2446 #endif
2448 prevstate = kevent->k_header.dh_sigstate;
2449 ntoskrnl_wakeup(&kevent->k_header);
2451 #ifdef __FreeBSD__
2452 mtx_unlock(&ntoskrnl_dispatchlock);
2453 #else /* __NetBSD__ */
2454 DISPATCH_UNLOCK();
2455 #endif
2457 return(prevstate);
2460 __stdcall void
2461 KeClearEvent(nt_kevent *kevent)
2463 kevent->k_header.dh_sigstate = FALSE;
2464 return;
2467 __stdcall uint32_t
2468 KeReadStateEvent(nt_kevent *kevent)
2470 return(kevent->k_header.dh_sigstate);
2473 __stdcall static ndis_status
2474 ObReferenceObjectByHandle(
2475 ndis_handle handle,
2476 uint32_t reqaccess,
2477 void *otype,
2478 uint8_t accessmode,
2479 void **object,
2480 void **handleinfo)
2482 nt_objref *nr;
2484 nr = malloc(sizeof(nt_objref), M_DEVBUF, M_NOWAIT|M_ZERO);
2485 if (nr == NULL)
2486 return(NDIS_STATUS_FAILURE);
2488 INIT_LIST_HEAD((&nr->no_dh.dh_waitlisthead));
2489 nr->no_obj = handle;
2490 nr->no_dh.dh_size = OTYPE_THREAD;
2491 TAILQ_INSERT_TAIL(&ntoskrnl_reflist, nr, link);
2492 *object = nr;
2494 return(NDIS_STATUS_SUCCESS);
2497 __fastcall static void
2498 ObfDereferenceObject(REGARGS1(void *object))
2500 nt_objref *nr;
2502 nr = object;
2503 TAILQ_REMOVE(&ntoskrnl_reflist, nr, link);
2504 free(nr, M_DEVBUF);
2506 return;
2509 __stdcall static uint32_t
2510 ZwClose(ndis_handle handle)
2512 return(STATUS_SUCCESS);
2516 * This is here just in case the thread returns without calling
2517 * PsTerminateSystemThread().
2519 static void
2520 ntoskrnl_thrfunc(void *arg)
2522 thread_context *thrctx;
2523 __stdcall uint32_t (*tfunc)(void *);
2524 void *tctx;
2525 uint32_t rval;
2527 thrctx = arg;
2528 tfunc = thrctx->tc_thrfunc;
2529 tctx = thrctx->tc_thrctx;
2530 free(thrctx, M_TEMP);
2532 rval = MSCALL1(tfunc, tctx);
2534 PsTerminateSystemThread(rval);
2535 return; /* notreached */
2538 __stdcall static ndis_status
2539 PsCreateSystemThread(
2540 ndis_handle *handle,
2541 uint32_t reqaccess,
2542 void *objattrs,
2543 ndis_handle phandle,
2544 void *clientid,
2545 void *thrfunc,
2546 void *thrctx)
2548 int error;
2549 char tname[128];
2550 thread_context *tc;
2551 struct proc *p;
2553 tc = malloc(sizeof(thread_context), M_TEMP, M_NOWAIT);
2554 if (tc == NULL)
2555 return(NDIS_STATUS_FAILURE);
2557 tc->tc_thrctx = thrctx;
2558 tc->tc_thrfunc = thrfunc;
2560 sprintf(tname, "windows kthread %d", ntoskrnl_kth);
2561 #ifdef __FreeBSD__
2562 error = kthread_create(ntoskrnl_thrfunc, tc, &p,
2563 RFHIGHPID, NDIS_KSTACK_PAGES, tname);
2564 #else
2565 /* TODO: Provide a larger stack for these threads (NDIS_KSTACK_PAGES) */
2566 error = ndis_kthread_create(ntoskrnl_thrfunc, tc, &p, NULL, 0, tname);
2567 #endif
2568 *handle = p;
2570 ntoskrnl_kth++;
2572 return(error);
2576 * In Windows, the exit of a thread is an event that you're allowed
2577 * to wait on, assuming you've obtained a reference to the thread using
2578 * ObReferenceObjectByHandle(). Unfortunately, the only way we can
2579 * simulate this behavior is to register each thread we create in a
2580 * reference list, and if someone holds a reference to us, we poke
2581 * them.
2583 __stdcall static ndis_status
2584 PsTerminateSystemThread(ndis_status status)
2586 struct nt_objref *nr;
2587 #ifdef __NetBSD__
2588 int s;
2589 #endif
2591 #ifdef __NetBSD__
2592 DISPATCH_LOCK();
2593 #else
2594 mtx_lock(&ntoskrnl_dispatchlock);
2595 #endif
2597 TAILQ_FOREACH(nr, &ntoskrnl_reflist, link) {
2598 #ifdef __FreeBSD__
2599 if (nr->no_obj != curthread->td_proc)
2600 #else
2601 if (nr->no_obj != curproc)
2602 #endif
2603 continue;
2604 ntoskrnl_wakeup(&nr->no_dh);
2605 break;
2608 #ifdef __FreeBSD__
2609 mtx_unlock(&ntoskrnl_dispatchlock);
2610 #else /* __NetBSD__ */
2611 DISPATCH_UNLOCK();
2612 #endif
2614 ntoskrnl_kth--;
2616 #ifdef __FreeBSD__
2617 #if __FreeBSD_version < 502113
2618 mtx_lock(&Giant);
2619 #endif
2620 #endif /* __FreeBSD__ */
2621 kthread_exit(0);
2622 return(0); /* notreached */
2625 static uint32_t
2626 DbgPrint(char *fmt, ...)
2628 //va_list ap;
2630 if (bootverbose) {
2631 //va_start(ap, fmt);
2632 //vprintf(fmt, ap);
2635 return(STATUS_SUCCESS);
2638 __stdcall static void
2639 DbgBreakPoint(void)
2641 #if defined(__FreeBSD__) && __FreeBSD_version < 502113
2642 Debugger("DbgBreakPoint(): breakpoint");
2643 #elif defined(__FreeBSD__) && __FreeBSD_version >= 502113
2644 kdb_enter("DbgBreakPoint(): breakpoint");
2645 #else /* NetBSD case */
2646 ; /* TODO Search how to go into debugger without panic */
2647 #endif
2650 static void
2651 ntoskrnl_timercall(void *arg)
2653 ktimer *timer;
2654 struct timeval tv;
2655 #ifdef __NetBSD__
2656 int s;
2657 #endif
2659 #ifdef __FreeBSD__
2660 mtx_unlock(&Giant);
2661 #endif
2663 #ifdef __NetBSD__
2664 DISPATCH_LOCK();
2665 #else
2666 mtx_lock(&ntoskrnl_dispatchlock);
2667 #endif
2669 timer = arg;
2671 timer->k_header.dh_inserted = FALSE;
2674 * If this is a periodic timer, re-arm it
2675 * so it will fire again. We do this before
2676 * calling any deferred procedure calls because
2677 * it's possible the DPC might cancel the timer,
2678 * in which case it would be wrong for us to
2679 * re-arm it again afterwards.
2682 if (timer->k_period) {
2683 tv.tv_sec = 0;
2684 tv.tv_usec = timer->k_period * 1000;
2685 timer->k_header.dh_inserted = TRUE;
2686 #ifdef __FreeBSD__
2687 timer->k_handle = timeout(ntoskrnl_timercall,
2688 timer, tvtohz(&tv));
2689 #else /* __NetBSD__ */
2690 callout_reset(timer->k_handle, tvtohz(&tv), ntoskrnl_timercall, timer);
2691 #endif /* __NetBSD__ */
2694 if (timer->k_dpc != NULL)
2695 KeInsertQueueDpc(timer->k_dpc, NULL, NULL);
2697 ntoskrnl_wakeup(&timer->k_header);
2699 #ifdef __FreeBSD__
2700 mtx_unlock(&ntoskrnl_dispatchlock);
2701 #else /* __NetBSD__ */
2702 DISPATCH_UNLOCK();
2703 #endif
2705 #ifdef __FreeBSD__
2706 mtx_lock(&Giant);
2707 #endif
2709 return;
2712 __stdcall void
2713 KeInitializeTimer(ktimer *timer)
2715 if (timer == NULL)
2716 return;
2718 KeInitializeTimerEx(timer, EVENT_TYPE_NOTIFY);
2720 return;
2723 __stdcall void
2724 KeInitializeTimerEx(ktimer *timer, uint32_t type)
2726 if (timer == NULL)
2727 return;
2729 INIT_LIST_HEAD((&timer->k_header.dh_waitlisthead));
2730 timer->k_header.dh_sigstate = FALSE;
2731 timer->k_header.dh_inserted = FALSE;
2732 timer->k_header.dh_type = type;
2733 timer->k_header.dh_size = OTYPE_TIMER;
2734 #ifdef __FreeBSD__
2735 callout_handle_init(&timer->k_handle);
2736 #else
2737 callout_init(timer->k_handle, 0);
2738 #endif
2740 return;
2744 * This is a wrapper for Windows deferred procedure calls that
2745 * have been placed on an NDIS thread work queue. We need it
2746 * since the DPC could be a _stdcall function. Also, as far as
2747 * I can tell, defered procedure calls must run at DISPATCH_LEVEL.
2749 static void
2750 ntoskrnl_run_dpc(void *arg)
2752 __stdcall kdpc_func dpcfunc;
2753 kdpc *dpc;
2754 uint8_t irql;
2756 dpc = arg;
2757 dpcfunc = dpc->k_deferedfunc;
2758 irql = KeRaiseIrql(DISPATCH_LEVEL);
2759 MSCALL4(dpcfunc, dpc, dpc->k_deferredctx,
2760 dpc->k_sysarg1, dpc->k_sysarg2);
2761 KeLowerIrql(irql);
2763 return;
2766 __stdcall void
2767 KeInitializeDpc(kdpc *dpc, void *dpcfunc, void *dpcctx)
2770 if (dpc == NULL)
2771 return;
2773 dpc->k_deferedfunc = dpcfunc;
2774 dpc->k_deferredctx = dpcctx;
2776 return;
2779 __stdcall uint8_t
2780 KeInsertQueueDpc(kdpc *dpc, void *sysarg1, void *sysarg2)
2782 dpc->k_sysarg1 = sysarg1;
2783 dpc->k_sysarg2 = sysarg2;
2785 if (ndis_sched(ntoskrnl_run_dpc, dpc, NDIS_SWI))
2786 return(FALSE);
2788 return(TRUE);
2791 __stdcall uint8_t
2792 KeRemoveQueueDpc(kdpc *dpc)
2794 if (ndis_unsched(ntoskrnl_run_dpc, dpc, NDIS_SWI))
2795 return(FALSE);
2797 return(TRUE);
2800 __stdcall uint8_t
2801 KeSetTimerEx(ktimer *timer, int64_t duetime, uint32_t period, kdpc *dpc)
2803 struct timeval tv;
2804 uint64_t curtime;
2805 uint8_t pending;
2806 #ifdef __NetBSD__
2807 int s;
2808 #endif
2810 if (timer == NULL)
2811 return(FALSE);
2813 #ifdef __NetBSD__
2814 DISPATCH_LOCK();
2815 #else
2816 mtx_lock(&ntoskrnl_dispatchlock);
2817 #endif
2819 if (timer->k_header.dh_inserted == TRUE) {
2820 #ifdef __FreeBSD__
2821 untimeout(ntoskrnl_timercall, timer, timer->k_handle);
2822 #else /* __NetBSD__ */
2823 callout_stop(timer->k_handle);
2824 #endif
2825 timer->k_header.dh_inserted = FALSE;
2826 pending = TRUE;
2827 } else
2828 pending = FALSE;
2830 timer->k_duetime = duetime;
2831 timer->k_period = period;
2832 timer->k_header.dh_sigstate = FALSE;
2833 timer->k_dpc = dpc;
2835 if (duetime < 0) {
2836 tv.tv_sec = - (duetime) / 10000000;
2837 tv.tv_usec = (- (duetime) / 10) -
2838 (tv.tv_sec * 1000000);
2839 } else {
2840 ntoskrnl_time(&curtime);
2841 if (duetime < curtime)
2842 tv.tv_sec = tv.tv_usec = 0;
2843 else {
2844 tv.tv_sec = ((duetime) - curtime) / 10000000;
2845 tv.tv_usec = ((duetime) - curtime) / 10 -
2846 (tv.tv_sec * 1000000);
2850 timer->k_header.dh_inserted = TRUE;
2851 #ifdef __FreeBSD__
2852 timer->k_handle = timeout(ntoskrnl_timercall, timer, tvtohz(&tv));
2853 #else
2854 callout_reset(timer->k_handle, tvtohz(&tv), ntoskrnl_timercall, timer);
2855 #endif
2857 #ifdef __FreeBSD__
2858 mtx_unlock(&ntoskrnl_dispatchlock);
2859 #else /* __NetBSD__ */
2860 DISPATCH_UNLOCK();
2861 #endif
2863 return(pending);
2866 __stdcall uint8_t
2867 KeSetTimer(ktimer *timer, int64_t duetime, kdpc *dpc)
2869 return (KeSetTimerEx(timer, duetime, 0, dpc));
2872 __stdcall uint8_t
2873 KeCancelTimer(ktimer *timer)
2875 uint8_t pending;
2876 #ifdef __NetBSD__
2877 int s;
2878 #endif
2880 if (timer == NULL)
2881 return(FALSE);
2883 #ifdef __NetBSD__
2884 DISPATCH_LOCK();
2885 #else
2886 mtx_lock(&ntoskrnl_dispatchlock);
2887 #endif
2889 if (timer->k_header.dh_inserted == TRUE) {
2890 #ifdef __FreeBSD__
2891 untimeout(ntoskrnl_timercall, timer, timer->k_handle);
2892 #else /* __NetBSD__ */
2893 callout_stop(timer->k_handle);
2894 #endif
2895 pending = TRUE;
2896 } else
2897 pending = KeRemoveQueueDpc(timer->k_dpc);
2899 #ifdef __FreeBSD__
2900 mtx_unlock(&ntoskrnl_dispatchlock);
2901 #else /* __NetBSD__ */
2902 DISPATCH_UNLOCK();
2903 #endif
2905 return(pending);
2908 __stdcall uint8_t
2909 KeReadStateTimer(ktimer *timer)
2911 return(timer->k_header.dh_sigstate);
2914 __stdcall static void
2915 dummy(void)
2917 printf ("ntoskrnl dummy called...\n");
2918 return;
2922 image_patch_table ntoskrnl_functbl[] = {
2923 IMPORT_FUNC(RtlCompareMemory),
2924 IMPORT_FUNC(RtlEqualUnicodeString),
2925 IMPORT_FUNC(RtlCopyUnicodeString),
2926 IMPORT_FUNC(RtlUnicodeStringToAnsiString),
2927 IMPORT_FUNC(RtlAnsiStringToUnicodeString),
2928 IMPORT_FUNC(RtlInitAnsiString),
2929 IMPORT_FUNC_MAP(RtlInitString, RtlInitAnsiString),
2930 IMPORT_FUNC(RtlInitUnicodeString),
2931 IMPORT_FUNC(RtlFreeAnsiString),
2932 IMPORT_FUNC(RtlFreeUnicodeString),
2933 IMPORT_FUNC(RtlUnicodeStringToInteger),
2934 IMPORT_FUNC(sprintf),
2935 IMPORT_FUNC(vsprintf),
2936 IMPORT_FUNC_MAP(_snprintf, snprintf),
2937 IMPORT_FUNC_MAP(_vsnprintf, vsnprintf),
2938 IMPORT_FUNC(DbgPrint),
2939 IMPORT_FUNC(DbgBreakPoint),
2940 IMPORT_FUNC(strncmp),
2941 IMPORT_FUNC(strcmp),
2942 IMPORT_FUNC(strncpy),
2943 IMPORT_FUNC(strcpy),
2944 IMPORT_FUNC(strlen),
2945 IMPORT_FUNC(memcpy),
2946 IMPORT_FUNC_MAP(memmove, ntoskrnl_memset),
2947 IMPORT_FUNC_MAP(memset, ntoskrnl_memset),
2948 IMPORT_FUNC(IoAllocateDriverObjectExtension),
2949 IMPORT_FUNC(IoGetDriverObjectExtension),
2950 IMPORT_FUNC(IofCallDriver),
2951 IMPORT_FUNC(IofCompleteRequest),
2952 IMPORT_FUNC(IoAcquireCancelSpinLock),
2953 IMPORT_FUNC(IoReleaseCancelSpinLock),
2954 IMPORT_FUNC(IoCancelIrp),
2955 IMPORT_FUNC(IoCreateDevice),
2956 IMPORT_FUNC(IoDeleteDevice),
2957 IMPORT_FUNC(IoGetAttachedDevice),
2958 IMPORT_FUNC(IoAttachDeviceToDeviceStack),
2959 IMPORT_FUNC(IoDetachDevice),
2960 IMPORT_FUNC(IoBuildSynchronousFsdRequest),
2961 IMPORT_FUNC(IoBuildAsynchronousFsdRequest),
2962 IMPORT_FUNC(IoBuildDeviceIoControlRequest),
2963 IMPORT_FUNC(IoAllocateIrp),
2964 IMPORT_FUNC(IoReuseIrp),
2965 IMPORT_FUNC(IoMakeAssociatedIrp),
2966 IMPORT_FUNC(IoFreeIrp),
2967 IMPORT_FUNC(IoInitializeIrp),
2968 IMPORT_FUNC(KeWaitForSingleObject),
2969 IMPORT_FUNC(KeWaitForMultipleObjects),
2970 IMPORT_FUNC(_allmul),
2971 IMPORT_FUNC(_alldiv),
2972 IMPORT_FUNC(_allrem),
2973 IMPORT_FUNC(_allshr),
2974 IMPORT_FUNC(_allshl),
2975 IMPORT_FUNC(_aullmul),
2976 IMPORT_FUNC(_aulldiv),
2977 IMPORT_FUNC(_aullrem),
2978 IMPORT_FUNC(_aullshr),
2979 IMPORT_FUNC(_aullshl),
2980 IMPORT_FUNC(atoi),
2981 IMPORT_FUNC(atol),
2982 IMPORT_FUNC(rand),
2983 IMPORT_FUNC(srand),
2984 IMPORT_FUNC(WRITE_REGISTER_USHORT),
2985 IMPORT_FUNC(READ_REGISTER_USHORT),
2986 IMPORT_FUNC(WRITE_REGISTER_ULONG),
2987 IMPORT_FUNC(READ_REGISTER_ULONG),
2988 IMPORT_FUNC(READ_REGISTER_UCHAR),
2989 IMPORT_FUNC(WRITE_REGISTER_UCHAR),
2990 IMPORT_FUNC(ExInitializePagedLookasideList),
2991 IMPORT_FUNC(ExDeletePagedLookasideList),
2992 IMPORT_FUNC(ExInitializeNPagedLookasideList),
2993 IMPORT_FUNC(ExDeleteNPagedLookasideList),
2994 IMPORT_FUNC(InterlockedPopEntrySList),
2995 IMPORT_FUNC(InterlockedPushEntrySList),
2996 IMPORT_FUNC(ExQueryDepthSList),
2997 IMPORT_FUNC_MAP(ExpInterlockedPopEntrySList, InterlockedPopEntrySList),
2998 IMPORT_FUNC_MAP(ExpInterlockedPushEntrySList,
2999 InterlockedPushEntrySList),
3000 IMPORT_FUNC(ExInterlockedPopEntrySList),
3001 IMPORT_FUNC(ExInterlockedPushEntrySList),
3002 IMPORT_FUNC(ExAllocatePoolWithTag),
3003 IMPORT_FUNC(ExFreePool),
3004 #ifdef __i386__
3005 IMPORT_FUNC(KefAcquireSpinLockAtDpcLevel),
3006 IMPORT_FUNC(KefReleaseSpinLockFromDpcLevel),
3007 IMPORT_FUNC(KeAcquireSpinLockRaiseToDpc),
3008 #else
3010 * For AMD64, we can get away with just mapping
3011 * KeAcquireSpinLockRaiseToDpc() directly to KfAcquireSpinLock()
3012 * because the calling conventions end up being the same.
3013 * On i386, we have to be careful because KfAcquireSpinLock()
3014 * is _fastcall but KeAcquireSpinLockRaiseToDpc() isn't.
3016 IMPORT_FUNC(KeAcquireSpinLockAtDpcLevel),
3017 IMPORT_FUNC(KeReleaseSpinLockFromDpcLevel),
3018 IMPORT_FUNC_MAP(KeAcquireSpinLockRaiseToDpc, KfAcquireSpinLock),
3019 #endif
3020 IMPORT_FUNC_MAP(KeReleaseSpinLock, KfReleaseSpinLock),
3021 IMPORT_FUNC(InterlockedIncrement),
3022 IMPORT_FUNC(InterlockedDecrement),
3023 IMPORT_FUNC(ExInterlockedAddLargeStatistic),
3024 IMPORT_FUNC(IoAllocateMdl),
3025 IMPORT_FUNC(IoFreeMdl),
3026 IMPORT_FUNC(MmSizeOfMdl),
3027 IMPORT_FUNC(MmMapLockedPages),
3028 IMPORT_FUNC(MmMapLockedPagesSpecifyCache),
3029 IMPORT_FUNC(MmUnmapLockedPages),
3030 IMPORT_FUNC(MmBuildMdlForNonPagedPool),
3031 IMPORT_FUNC(KeInitializeSpinLock),
3032 IMPORT_FUNC(IoIsWdmVersionAvailable),
3033 IMPORT_FUNC(IoGetDeviceProperty),
3034 IMPORT_FUNC(KeInitializeMutex),
3035 IMPORT_FUNC(KeReleaseMutex),
3036 IMPORT_FUNC(KeReadStateMutex),
3037 IMPORT_FUNC(KeInitializeEvent),
3038 IMPORT_FUNC(KeSetEvent),
3039 IMPORT_FUNC(KeResetEvent),
3040 IMPORT_FUNC(KeClearEvent),
3041 IMPORT_FUNC(KeReadStateEvent),
3042 IMPORT_FUNC(KeInitializeTimer),
3043 IMPORT_FUNC(KeInitializeTimerEx),
3044 IMPORT_FUNC(KeSetTimer),
3045 IMPORT_FUNC(KeSetTimerEx),
3046 IMPORT_FUNC(KeCancelTimer),
3047 IMPORT_FUNC(KeReadStateTimer),
3048 IMPORT_FUNC(KeInitializeDpc),
3049 IMPORT_FUNC(KeInsertQueueDpc),
3050 IMPORT_FUNC(KeRemoveQueueDpc),
3051 IMPORT_FUNC(ObReferenceObjectByHandle),
3052 IMPORT_FUNC(ObfDereferenceObject),
3053 IMPORT_FUNC(ZwClose),
3054 IMPORT_FUNC(PsCreateSystemThread),
3055 IMPORT_FUNC(PsTerminateSystemThread),
3058 * This last entry is a catch-all for any function we haven't
3059 * implemented yet. The PE import list patching routine will
3060 * use it for any function that doesn't have an explicit match
3061 * in this table.
3064 { NULL, (FUNC)dummy, NULL },
3066 /* End of list. */
3068 { NULL, NULL, NULL }