3 * Bill Paul <wpaul@windriver.com>. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by Bill Paul.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
33 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD: src/sys/compat/ndis/subr_ntoskrnl.c,v 1.43.2.5 2005/03/31 04:24:36 wpaul Exp $");
38 __KERNEL_RCSID(0, "$NetBSD: subr_ntoskrnl.c,v 1.17 2009/03/18 16:00:17 cegger Exp $");
42 #include <sys/ctype.h>
44 #include <sys/unistd.h>
45 #include <sys/param.h>
46 #include <sys/types.h>
47 #include <sys/errno.h>
48 #include <sys/systm.h>
49 #include <sys/malloc.h>
52 #include <sys/mutex.h>
55 #include <sys/callout.h>
56 #if __FreeBSD_version > 502113
59 #include <sys/kernel.h>
61 #include <sys/kthread.h>
62 #include <sys/module.h>
63 #include <sys/atomic.h>
65 #include <machine/clock.h>
66 #include <machine/bus_memio.h>
67 #include <machine/bus_pio.h>
70 #include <machine/stdarg.h>
79 #include <uvm/uvm_param.h>
80 #include <uvm/uvm_pmap.h>
82 #include <sys/reboot.h> /* for AB_VERBOSE */
85 #include <vm/vm_param.h>
90 #include <compat/ndis/pe_var.h>
91 #include <compat/ndis/ntoskrnl_var.h>
92 #include <compat/ndis/hal_var.h>
93 #include <compat/ndis/resource_var.h>
94 #include <compat/ndis/ndis_var.h>
96 #include <compat/ndis/nbcompat.h>
99 #define __regparm __attribute__((regparm(3)))
102 /* Turn on DbgPrint() from Windows Driver*/
103 #define boothowto AB_VERBOSE
106 __stdcall
static uint8_t RtlEqualUnicodeString(ndis_unicode_string
*,
107 ndis_unicode_string
*, uint8_t);
108 __stdcall
static void RtlCopyUnicodeString(ndis_unicode_string
*,
109 ndis_unicode_string
*);
110 __stdcall
static ndis_status
RtlUnicodeStringToAnsiString(ndis_ansi_string
*,
111 ndis_unicode_string
*, uint8_t);
112 __stdcall
static ndis_status
RtlAnsiStringToUnicodeString(ndis_unicode_string
*,
113 ndis_ansi_string
*, uint8_t);
114 __stdcall
static irp
*IoBuildSynchronousFsdRequest(uint32_t, device_object
*,
115 void *, uint32_t, uint64_t *, nt_kevent
*, io_status_block
*);
116 __stdcall
static irp
*IoBuildAsynchronousFsdRequest(uint32_t,
117 device_object
*, void *, uint32_t, uint64_t *, io_status_block
*);
118 __stdcall
static irp
*IoBuildDeviceIoControlRequest(uint32_t,
119 device_object
*, void *, uint32_t, void *, uint32_t,
120 uint8_t, nt_kevent
*, io_status_block
*);
121 __stdcall
static irp
*IoAllocateIrp(uint8_t, uint8_t);
122 __stdcall
static void IoReuseIrp(irp
*, uint32_t);
123 __stdcall
static void IoFreeIrp(irp
*);
124 __stdcall
static void IoInitializeIrp(irp
*, uint16_t, uint8_t);
125 __stdcall
static irp
*IoMakeAssociatedIrp(irp
*, uint8_t);
126 __stdcall
static uint32_t KeWaitForMultipleObjects(uint32_t,
127 nt_dispatch_header
**, uint32_t, uint32_t, uint32_t, uint8_t,
128 int64_t *, wait_block
*);
129 static void ntoskrnl_wakeup(void *);
130 static void ntoskrnl_timercall(void *);
131 static void ntoskrnl_run_dpc(void *);
132 __stdcall
static void WRITE_REGISTER_USHORT(uint16_t *, uint16_t);
133 __stdcall
static uint16_t READ_REGISTER_USHORT(uint16_t *);
134 __stdcall
static void WRITE_REGISTER_ULONG(uint32_t *, uint32_t);
135 __stdcall
static uint32_t READ_REGISTER_ULONG(uint32_t *);
136 __stdcall
static void WRITE_REGISTER_UCHAR(uint8_t *, uint8_t);
137 __stdcall
static uint8_t READ_REGISTER_UCHAR(uint8_t *);
138 __stdcall
static int64_t _allmul(int64_t, int64_t);
139 __stdcall
static int64_t _alldiv(int64_t, int64_t);
140 __stdcall
static int64_t _allrem(int64_t, int64_t);
141 __regparm
static int64_t _allshr(int64_t, uint8_t);
142 __regparm
static int64_t _allshl(int64_t, uint8_t);
143 __stdcall
static uint64_t _aullmul(uint64_t, uint64_t);
144 __stdcall
static uint64_t _aulldiv(uint64_t, uint64_t);
145 __stdcall
static uint64_t _aullrem(uint64_t, uint64_t);
146 __regparm
static uint64_t _aullshr(uint64_t, uint8_t);
147 __regparm
static uint64_t _aullshl(uint64_t, uint8_t);
148 static slist_entry
*ntoskrnl_pushsl(slist_header
*, slist_entry
*);
149 static slist_entry
*ntoskrnl_popsl(slist_header
*);
150 __stdcall
static void ExInitializePagedLookasideList(paged_lookaside_list
*,
151 lookaside_alloc_func
*, lookaside_free_func
*,
152 uint32_t, size_t, uint32_t, uint16_t);
153 __stdcall
static void ExDeletePagedLookasideList(paged_lookaside_list
*);
154 __stdcall
static void ExInitializeNPagedLookasideList(npaged_lookaside_list
*,
155 lookaside_alloc_func
*, lookaside_free_func
*,
156 uint32_t, size_t, uint32_t, uint16_t);
157 __stdcall
static void ExDeleteNPagedLookasideList(npaged_lookaside_list
*);
158 __fastcall
static slist_entry
159 *InterlockedPushEntrySList(REGARGS2(slist_header
*head
,
160 slist_entry
*entry
));
161 __fastcall
static slist_entry
*InterlockedPopEntrySList(REGARGS1(slist_header
163 __fastcall
static slist_entry
164 *ExInterlockedPushEntrySList(REGARGS2(slist_header
*head
,
165 slist_entry
*entry
), kspin_lock
*lock
);
166 __fastcall
static slist_entry
167 *ExInterlockedPopEntrySList(REGARGS2(slist_header
*head
,
169 __stdcall
static uint16_t
170 ExQueryDepthSList(slist_header
*);
171 __fastcall
static uint32_t
172 InterlockedIncrement(REGARGS1(volatile uint32_t *addend
));
173 __fastcall
static uint32_t
174 InterlockedDecrement(REGARGS1(volatile uint32_t *addend
));
175 __fastcall
static void
176 ExInterlockedAddLargeStatistic(REGARGS2(uint64_t *addend
, uint32_t));
177 __stdcall
static uint32_t MmSizeOfMdl(void *, size_t);
178 __stdcall
static void MmBuildMdlForNonPagedPool(mdl
*);
179 __stdcall
static void *MmMapLockedPages(mdl
*, uint8_t);
180 __stdcall
static void *MmMapLockedPagesSpecifyCache(mdl
*,
181 uint8_t, uint32_t, void *, uint32_t, uint32_t);
182 __stdcall
static void MmUnmapLockedPages(void *, mdl
*);
183 __stdcall
static size_t RtlCompareMemory(const void *, const void *, size_t);
184 __stdcall
static void RtlInitAnsiString(ndis_ansi_string
*, char *);
185 __stdcall
static void RtlInitUnicodeString(ndis_unicode_string
*,
187 __stdcall
static void RtlFreeUnicodeString(ndis_unicode_string
*);
188 __stdcall
static void RtlFreeAnsiString(ndis_ansi_string
*);
189 __stdcall
static ndis_status
RtlUnicodeStringToInteger(ndis_unicode_string
*,
190 uint32_t, uint32_t *);
191 static int atoi (const char *);
192 static long atol (const char *);
193 static int rand(void);
194 static void srand(unsigned int);
195 static void ntoskrnl_time(uint64_t *);
196 __stdcall
static uint8_t IoIsWdmVersionAvailable(uint8_t, uint8_t);
197 static void ntoskrnl_thrfunc(void *);
198 __stdcall
static ndis_status
PsCreateSystemThread(ndis_handle
*,
199 uint32_t, void *, ndis_handle
, void *, void *, void *);
200 __stdcall
static ndis_status
PsTerminateSystemThread(ndis_status
);
201 __stdcall
static ndis_status
IoGetDeviceProperty(device_object
*, uint32_t,
202 uint32_t, void *, uint32_t *);
203 __stdcall
static void KeInitializeMutex(kmutant
*, uint32_t);
204 __stdcall
static uint32_t KeReleaseMutex(kmutant
*, uint8_t);
205 __stdcall
static uint32_t KeReadStateMutex(kmutant
*);
206 __stdcall
static ndis_status
ObReferenceObjectByHandle(ndis_handle
,
207 uint32_t, void *, uint8_t, void **, void **);
208 __fastcall
static void ObfDereferenceObject(REGARGS1(void *object
));
209 __stdcall
static uint32_t ZwClose(ndis_handle
);
210 static void *ntoskrnl_memset(void *, int, size_t);
211 static funcptr
ntoskrnl_findwrap(funcptr
);
212 static uint32_t DbgPrint(char *, ...);
213 __stdcall
static void DbgBreakPoint(void);
214 __stdcall
static void dummy(void);
217 static struct mtx ntoskrnl_dispatchlock
;
218 #else /* __NetBSD__ */
219 static struct simplelock ntoskrnl_dispatchlock
;
220 #define DISPATCH_LOCK() do {s = splnet(); simple_lock(&ntoskrnl_dispatchlock);} while(0)
221 #define DISPATCH_UNLOCK() do {simple_unlock(&ntoskrnl_dispatchlock); splx(s);} while(0)
224 static kspin_lock ntoskrnl_global
;
225 static kspin_lock ntoskrnl_cancellock
;
226 static int ntoskrnl_kth
= 0;
227 static struct nt_objref_head ntoskrnl_reflist
;
229 static uma_zone_t mdl_zone
;
231 static struct pool mdl_pool
;
235 ntoskrnl_libinit(void)
237 image_patch_table
*patch
;
239 mtx_init(&ntoskrnl_dispatchlock
,
240 "ntoskrnl dispatch lock", MTX_NDIS_LOCK
, MTX_DEF
);
241 #else /* __NetBSD__ */
242 simple_lock_init(&ntoskrnl_dispatchlock
);
244 KeInitializeSpinLock(&ntoskrnl_global
);
245 KeInitializeSpinLock(&ntoskrnl_cancellock
);
246 TAILQ_INIT(&ntoskrnl_reflist
);
248 patch
= ntoskrnl_functbl
;
249 while (patch
->ipt_func
!= NULL
) {
250 windrv_wrap((funcptr
)patch
->ipt_func
,
251 (funcptr
*)&patch
->ipt_wrap
);
256 * MDLs are supposed to be variable size (they describe
257 * buffers containing some number of pages, but we don't
258 * know ahead of time how many pages that will be). But
259 * always allocating them off the heap is very slow. As
260 * a compromize, we create an MDL UMA zone big enough to
261 * handle any buffer requiring up to 16 pages, and we
262 * use those for any MDLs for buffers of 16 pages or less
263 * in size. For buffers larger than that (which we assume
264 * will be few and far between, we allocate the MDLs off
269 mdl_zone
= uma_zcreate("Windows MDL", MDL_ZONE_SIZE
,
270 NULL
, NULL
, NULL
, NULL
, UMA_ALIGN_PTR
, 0);
272 pool_init(&mdl_pool
, MDL_ZONE_SIZE
, 0, 0, 0, "winmdl", NULL
,
280 ntoskrnl_libfini(void)
282 image_patch_table
*patch
;
284 patch
= ntoskrnl_functbl
;
285 while (patch
->ipt_func
!= NULL
) {
286 windrv_unwrap(patch
->ipt_wrap
);
291 uma_zdestroy(mdl_zone
);
292 mtx_destroy(&ntoskrnl_dispatchlock
);
294 pool_destroy(&mdl_pool
);
295 /* XXX destroy lock */
302 * We need to be able to reference this externally from the wrapper;
303 * GCC only generates a local implementation of memset.
306 ntoskrnl_memset(void *buf
, int ch
, size_t size
)
308 return(memset(buf
, ch
, size
));
311 __stdcall
static uint8_t
312 RtlEqualUnicodeString(ndis_unicode_string
*str1
, ndis_unicode_string
*str2
, uint8_t caseinsensitive
)
316 if (str1
->us_len
!= str2
->us_len
)
319 for (i
= 0; i
< str1
->us_len
; i
++) {
320 if (caseinsensitive
== TRUE
) {
321 if (toupper((char)(str1
->us_buf
[i
] & 0xFF)) !=
322 toupper((char)(str2
->us_buf
[i
] & 0xFF)))
325 if (str1
->us_buf
[i
] != str2
->us_buf
[i
])
333 __stdcall
static void
334 RtlCopyUnicodeString(ndis_unicode_string
*dest
, ndis_unicode_string
*src
)
337 if (dest
->us_maxlen
>= src
->us_len
)
338 dest
->us_len
= src
->us_len
;
340 dest
->us_len
= dest
->us_maxlen
;
341 memcpy(dest
->us_buf
, src
->us_buf
, dest
->us_len
);
345 __stdcall
static ndis_status
346 RtlUnicodeStringToAnsiString(ndis_ansi_string
*dest
, ndis_unicode_string
*src
, uint8_t allocate
)
350 if (dest
== NULL
|| src
== NULL
)
351 return(NDIS_STATUS_FAILURE
);
353 if (allocate
== TRUE
) {
354 if (ndis_unicode_to_ascii(src
->us_buf
, src
->us_len
, &astr
))
355 return(NDIS_STATUS_FAILURE
);
356 dest
->nas_buf
= astr
;
357 dest
->nas_len
= dest
->nas_maxlen
= strlen(astr
);
359 dest
->nas_len
= src
->us_len
/ 2; /* XXX */
360 if (dest
->nas_maxlen
< dest
->nas_len
)
361 dest
->nas_len
= dest
->nas_maxlen
;
362 ndis_unicode_to_ascii(src
->us_buf
, dest
->nas_len
* 2,
365 return (NDIS_STATUS_SUCCESS
);
368 __stdcall
static ndis_status
369 RtlAnsiStringToUnicodeString(ndis_unicode_string
*dest
, ndis_ansi_string
*src
, uint8_t allocate
)
371 uint16_t *ustr
= NULL
;
373 if (dest
== NULL
|| src
== NULL
)
374 return(NDIS_STATUS_FAILURE
);
376 if (allocate
== TRUE
) {
377 if (ndis_ascii_to_unicode(src
->nas_buf
, &ustr
))
378 return(NDIS_STATUS_FAILURE
);
380 dest
->us_len
= dest
->us_maxlen
= strlen(src
->nas_buf
) * 2;
382 dest
->us_len
= src
->nas_len
* 2; /* XXX */
383 if (dest
->us_maxlen
< dest
->us_len
)
384 dest
->us_len
= dest
->us_maxlen
;
385 ndis_ascii_to_unicode(src
->nas_buf
, &dest
->us_buf
);
387 return (NDIS_STATUS_SUCCESS
);
391 ExAllocatePoolWithTag(
398 buf
= malloc(len
, M_DEVBUF
, M_NOWAIT
);
405 ExFreePool(void *buf
)
412 IoAllocateDriverObjectExtension(driver_object
*drv
, void *clid
, uint32_t extlen
, void **ext
)
414 custom_extension
*ce
;
416 ce
= ExAllocatePoolWithTag(NonPagedPool
, sizeof(custom_extension
)
420 return(STATUS_INSUFFICIENT_RESOURCES
);
423 INSERT_LIST_TAIL((&drv
->dro_driverext
->dre_usrext
), (&ce
->ce_list
));
425 *ext
= (void *)(ce
+ 1);
427 return(STATUS_SUCCESS
);
431 IoGetDriverObjectExtension(driver_object
*drv
, void *clid
)
434 custom_extension
*ce
;
436 printf("in IoGetDriverObjectExtension\n");
438 e
= drv
->dro_driverext
->dre_usrext
.nle_flink
;
439 while (e
!= &drv
->dro_driverext
->dre_usrext
) {
440 ce
= (custom_extension
*)e
;
441 if (ce
->ce_clid
== clid
)
443 return((void *)(ce
+ 1));
446 printf("not found\n");
455 unicode_string
*devname
,
459 device_object
**newdev
)
464 printf("In IoCreateDevice: drv = %x, devextlen = %x\n", drv
, devextlen
);
467 dev
= ExAllocatePoolWithTag(NonPagedPool
, sizeof(device_object
), 0);
469 printf("dev = %x\n", dev
);
472 return(STATUS_INSUFFICIENT_RESOURCES
);
474 dev
->do_type
= devtype
;
475 dev
->do_drvobj
= drv
;
476 dev
->do_currirp
= NULL
;
480 dev
->do_devext
= ExAllocatePoolWithTag(NonPagedPool
,
483 if (dev
->do_devext
== NULL
) {
485 return(STATUS_INSUFFICIENT_RESOURCES
);
488 memset(dev
->do_devext
, 0, devextlen
);
490 dev
->do_devext
= NULL
;
492 dev
->do_size
= sizeof(device_object
) + devextlen
;
494 dev
->do_attacheddev
= NULL
;
495 dev
->do_nextdev
= NULL
;
496 dev
->do_devtype
= devtype
;
497 dev
->do_stacksize
= 1;
498 dev
->do_alignreq
= 1;
499 dev
->do_characteristics
= devchars
;
500 dev
->do_iotimer
= NULL
;
501 KeInitializeEvent(&dev
->do_devlock
, EVENT_TYPE_SYNC
, TRUE
);
504 * Vpd is used for disk/tape devices,
505 * but we don't support those. (Yet.)
509 dev
->do_devobj_ext
= ExAllocatePoolWithTag(NonPagedPool
,
510 sizeof(devobj_extension
), 0);
512 if (dev
->do_devobj_ext
== NULL
) {
513 if (dev
->do_devext
!= NULL
)
514 ExFreePool(dev
->do_devext
);
516 return(STATUS_INSUFFICIENT_RESOURCES
);
519 dev
->do_devobj_ext
->dve_type
= 0;
520 dev
->do_devobj_ext
->dve_size
= sizeof(devobj_extension
);
521 dev
->do_devobj_ext
->dve_devobj
= dev
;
524 * Attach this device to the driver object's list
525 * of devices. Note: this is not the same as attaching
526 * the device to the device stack. The driver's AddDevice
527 * routine must explicitly call IoAddDeviceToDeviceStack()
531 if (drv
->dro_devobj
== NULL
) {
532 drv
->dro_devobj
= dev
;
533 dev
->do_nextdev
= NULL
;
535 dev
->do_nextdev
= drv
->dro_devobj
;
536 drv
->dro_devobj
= dev
;
541 return(STATUS_SUCCESS
);
545 IoDeleteDevice(device_object
*dev
)
552 if (dev
->do_devobj_ext
!= NULL
)
553 ExFreePool(dev
->do_devobj_ext
);
555 if (dev
->do_devext
!= NULL
)
556 ExFreePool(dev
->do_devext
);
558 /* Unlink the device from the driver's device list. */
560 prev
= dev
->do_drvobj
->dro_devobj
;
562 dev
->do_drvobj
->dro_devobj
= dev
->do_nextdev
;
564 while (prev
->do_nextdev
!= dev
)
565 prev
= prev
->do_nextdev
;
566 prev
->do_nextdev
= dev
->do_nextdev
;
574 __stdcall device_object
*
575 IoGetAttachedDevice(device_object
*dev
)
584 while (d
->do_attacheddev
!= NULL
)
585 d
= d
->do_attacheddev
;
590 __stdcall
static irp
*
591 IoBuildSynchronousFsdRequest(uint32_t func
, device_object
*dobj
, void *buf
, uint32_t len
, uint64_t *off
, nt_kevent
*event
, io_status_block
*status
)
595 ip
= IoBuildAsynchronousFsdRequest(func
, dobj
, buf
, len
, off
, status
);
598 ip
->irp_usrevent
= event
;
603 __stdcall
static irp
*
604 IoBuildAsynchronousFsdRequest(uint32_t func
, device_object
*dobj
, void *buf
, uint32_t len
, uint64_t *off
, io_status_block
*status
)
607 io_stack_location
*sl
;
609 ip
= IoAllocateIrp(dobj
->do_stacksize
, TRUE
);
613 ip
->irp_usriostat
= status
;
614 ip
->irp_tail
.irp_overlay
.irp_thread
= NULL
;
616 sl
= IoGetNextIrpStackLocation(ip
);
617 sl
->isl_major
= func
;
621 sl
->isl_devobj
= dobj
;
622 sl
->isl_fileobj
= NULL
;
623 sl
->isl_completionfunc
= NULL
;
625 ip
->irp_userbuf
= buf
;
627 if (dobj
->do_flags
& DO_BUFFERED_IO
) {
628 ip
->irp_assoc
.irp_sysbuf
=
629 ExAllocatePoolWithTag(NonPagedPool
, len
, 0);
630 if (ip
->irp_assoc
.irp_sysbuf
== NULL
) {
634 memcpy( ip
->irp_assoc
.irp_sysbuf
, buf
, len
);
637 if (dobj
->do_flags
& DO_DIRECT_IO
) {
638 ip
->irp_mdl
= IoAllocateMdl(buf
, len
, FALSE
, FALSE
, ip
);
639 if (ip
->irp_mdl
== NULL
) {
640 if (ip
->irp_assoc
.irp_sysbuf
!= NULL
)
641 ExFreePool(ip
->irp_assoc
.irp_sysbuf
);
645 ip
->irp_userbuf
= NULL
;
646 ip
->irp_assoc
.irp_sysbuf
= NULL
;
649 if (func
== IRP_MJ_READ
) {
650 sl
->isl_parameters
.isl_read
.isl_len
= len
;
652 sl
->isl_parameters
.isl_read
.isl_byteoff
= *off
;
654 sl
->isl_parameters
.isl_read
.isl_byteoff
= 0;
657 if (func
== IRP_MJ_WRITE
) {
658 sl
->isl_parameters
.isl_write
.isl_len
= len
;
660 sl
->isl_parameters
.isl_write
.isl_byteoff
= *off
;
662 sl
->isl_parameters
.isl_write
.isl_byteoff
= 0;
668 __stdcall
static irp
*
669 IoBuildDeviceIoControlRequest(iocode
, dobj
, ibuf
, ilen
, obuf
, olen
,
670 isinternal
, event
, status
)
679 io_status_block
*status
;
682 io_stack_location
*sl
;
685 ip
= IoAllocateIrp(dobj
->do_stacksize
, TRUE
);
688 ip
->irp_usrevent
= event
;
689 ip
->irp_usriostat
= status
;
690 ip
->irp_tail
.irp_overlay
.irp_thread
= NULL
;
692 sl
= IoGetNextIrpStackLocation(ip
);
693 sl
->isl_major
= isinternal
== TRUE
?
694 IRP_MJ_INTERNAL_DEVICE_CONTROL
: IRP_MJ_DEVICE_CONTROL
;
698 sl
->isl_devobj
= dobj
;
699 sl
->isl_fileobj
= NULL
;
700 sl
->isl_completionfunc
= NULL
;
701 sl
->isl_parameters
.isl_ioctl
.isl_iocode
= iocode
;
702 sl
->isl_parameters
.isl_ioctl
.isl_ibuflen
= ilen
;
703 sl
->isl_parameters
.isl_ioctl
.isl_obuflen
= olen
;
705 switch(IO_METHOD(iocode
)) {
706 case METHOD_BUFFERED
:
712 ip
->irp_assoc
.irp_sysbuf
=
713 ExAllocatePoolWithTag(NonPagedPool
, buflen
, 0);
714 if (ip
->irp_assoc
.irp_sysbuf
== NULL
) {
719 if (ilen
&& ibuf
!= NULL
) {
720 memcpy( ip
->irp_assoc
.irp_sysbuf
, ibuf
, ilen
);
721 memset((char *)ip
->irp_assoc
.irp_sysbuf
+ ilen
, 0,
724 memset(ip
->irp_assoc
.irp_sysbuf
, 0, ilen
);
725 ip
->irp_userbuf
= obuf
;
727 case METHOD_IN_DIRECT
:
728 case METHOD_OUT_DIRECT
:
729 if (ilen
&& ibuf
!= NULL
) {
730 ip
->irp_assoc
.irp_sysbuf
=
731 ExAllocatePoolWithTag(NonPagedPool
, ilen
, 0);
732 if (ip
->irp_assoc
.irp_sysbuf
== NULL
) {
736 memcpy( ip
->irp_assoc
.irp_sysbuf
, ibuf
, ilen
);
738 if (olen
&& obuf
!= NULL
) {
739 ip
->irp_mdl
= IoAllocateMdl(obuf
, olen
,
742 * Normally we would MmProbeAndLockPages()
743 * here, but we don't have to in our
749 ip
->irp_userbuf
= obuf
;
750 sl
->isl_parameters
.isl_ioctl
.isl_type3ibuf
= ibuf
;
757 * Ideally, we should associate this IRP with the calling
764 __stdcall
static irp
*
771 i
= ExAllocatePoolWithTag(NonPagedPool
, IoSizeOfIrp(stsize
), 0);
775 IoInitializeIrp(i
, IoSizeOfIrp(stsize
), stsize
);
780 __stdcall
static irp
*
781 IoMakeAssociatedIrp(irp
*ip
, uint8_t stsize
)
788 associrp
= IoAllocateIrp(stsize
, FALSE
);
789 if (associrp
== NULL
)
795 mtx_lock(&ntoskrnl_dispatchlock
);
798 associrp
->irp_flags
|= IRP_ASSOCIATED_IRP
;
799 associrp
->irp_tail
.irp_overlay
.irp_thread
=
800 ip
->irp_tail
.irp_overlay
.irp_thread
;
801 associrp
->irp_assoc
.irp_master
= ip
;
804 mtx_unlock(&ntoskrnl_dispatchlock
);
805 #else /* __NetBSD__ */
812 __stdcall
static void
819 __stdcall
static void
820 IoInitializeIrp(irp
*io
, uint16_t psize
, uint8_t ssize
)
822 memset((char *)io
, 0, IoSizeOfIrp(ssize
));
823 io
->irp_size
= psize
;
824 io
->irp_stackcnt
= ssize
;
825 io
->irp_currentstackloc
= ssize
;
826 INIT_LIST_HEAD(&io
->irp_thlist
);
827 io
->irp_tail
.irp_overlay
.irp_csl
=
828 (io_stack_location
*)(io
+ 1) + ssize
;
833 __stdcall
static void
834 IoReuseIrp(irp
*ip
, uint32_t status
)
838 allocflags
= ip
->irp_allocflags
;
839 IoInitializeIrp(ip
, ip
->irp_size
, ip
->irp_stackcnt
);
840 ip
->irp_iostat
.isb_status
= status
;
841 ip
->irp_allocflags
= allocflags
;
847 IoAcquireCancelSpinLock(uint8_t *irql
)
849 KeAcquireSpinLock(&ntoskrnl_cancellock
, irql
);
854 IoReleaseCancelSpinLock(uint8_t irql
)
856 KeReleaseSpinLock(&ntoskrnl_cancellock
, irql
);
865 IoAcquireCancelSpinLock(&ip
->irp_cancelirql
);
866 cfunc
= IoSetCancelRoutine(ip
, NULL
);
867 ip
->irp_cancel
= TRUE
;
868 if (ip
->irp_cancelfunc
== NULL
) {
869 IoReleaseCancelSpinLock(ip
->irp_cancelirql
);
872 MSCALL2(cfunc
, IoGetCurrentIrpStackLocation(ip
)->isl_devobj
, ip
);
877 IofCallDriver(REGARGS2(device_object
*dobj
, irp
*ip
))
879 driver_object
*drvobj
;
880 io_stack_location
*sl
;
882 driver_dispatch disp
;
884 drvobj
= dobj
->do_drvobj
;
886 if (ip
->irp_currentstackloc
<= 0)
887 panic("IoCallDriver(): out of stack locations");
889 IoSetNextIrpStackLocation(ip
);
890 sl
= IoGetCurrentIrpStackLocation(ip
);
892 sl
->isl_devobj
= dobj
;
894 disp
= drvobj
->dro_dispatch
[sl
->isl_major
];
895 status
= MSCALL2(disp
, dobj
, ip
);
901 IofCompleteRequest(REGARGS2(irp
*ip
, uint8_t prioboost
))
906 io_stack_location
*sl
;
909 ip
->irp_pendingreturned
=
910 IoGetCurrentIrpStackLocation(ip
)->isl_ctl
& SL_PENDING_RETURNED
;
911 sl
= (io_stack_location
*)(ip
+ 1);
913 for (i
= ip
->irp_currentstackloc
; i
< (uint32_t)ip
->irp_stackcnt
; i
++) {
914 if (ip
->irp_currentstackloc
< ip
->irp_stackcnt
- 1) {
915 IoSkipCurrentIrpStackLocation(ip
);
916 dobj
= IoGetCurrentIrpStackLocation(ip
)->isl_devobj
;
920 if (sl
[i
].isl_completionfunc
!= NULL
&&
921 ((ip
->irp_iostat
.isb_status
== STATUS_SUCCESS
&&
922 sl
->isl_ctl
& SL_INVOKE_ON_SUCCESS
) ||
923 (ip
->irp_iostat
.isb_status
!= STATUS_SUCCESS
&&
924 sl
->isl_ctl
& SL_INVOKE_ON_ERROR
) ||
925 (ip
->irp_cancel
== TRUE
&&
926 sl
->isl_ctl
& SL_INVOKE_ON_CANCEL
))) {
927 cf
= sl
->isl_completionfunc
;
928 status
= MSCALL3(cf
, dobj
, ip
, sl
->isl_completionctx
);
929 if (status
== STATUS_MORE_PROCESSING_REQUIRED
)
933 if (IoGetCurrentIrpStackLocation(ip
)->isl_ctl
&
935 ip
->irp_pendingreturned
= TRUE
;
938 /* Handle any associated IRPs. */
940 if (ip
->irp_flags
& IRP_ASSOCIATED_IRP
) {
941 uint32_t masterirpcnt
;
945 masterirp
= ip
->irp_assoc
.irp_master
;
946 masterirpcnt
= FASTCALL1(InterlockedDecrement
,
947 &masterirp
->irp_assoc
.irp_irpcnt
);
949 while ((m
= ip
->irp_mdl
) != NULL
) {
950 ip
->irp_mdl
= m
->mdl_next
;
954 if (masterirpcnt
== 0)
955 IoCompleteRequest(masterirp
, IO_NO_INCREMENT
);
959 /* With any luck, these conditions will never arise. */
961 if (ip
->irp_flags
& (IRP_PAGING_IO
|IRP_CLOSE_OPERATION
)) {
962 if (ip
->irp_usriostat
!= NULL
)
963 *ip
->irp_usriostat
= ip
->irp_iostat
;
964 if (ip
->irp_usrevent
!= NULL
)
965 KeSetEvent(ip
->irp_usrevent
, prioboost
, FALSE
);
966 if (ip
->irp_flags
& IRP_PAGING_IO
) {
967 if (ip
->irp_mdl
!= NULL
)
968 IoFreeMdl(ip
->irp_mdl
);
976 __stdcall device_object
*
977 IoAttachDeviceToDeviceStack(device_object
*src
, device_object
*dst
)
979 device_object
*attached
;
987 mtx_lock(&ntoskrnl_dispatchlock
);
990 attached
= IoGetAttachedDevice(dst
);
991 attached
->do_attacheddev
= src
;
992 src
->do_attacheddev
= NULL
;
993 src
->do_stacksize
= attached
->do_stacksize
+ 1;
996 mtx_unlock(&ntoskrnl_dispatchlock
);
997 #else /* __NetBSD__ */
1005 IoDetachDevice(device_object
*topdev
)
1007 device_object
*tail
;
1015 mtx_lock(&ntoskrnl_dispatchlock
);
1018 /* First, break the chain. */
1019 tail
= topdev
->do_attacheddev
;
1022 mtx_unlock(&ntoskrnl_dispatchlock
);
1023 #else /* __NetBSD__ */
1028 topdev
->do_attacheddev
= tail
->do_attacheddev
;
1029 topdev
->do_refcnt
--;
1031 /* Now reduce the stacksize count for the tail objects. */
1033 tail
= topdev
->do_attacheddev
;
1034 while (tail
!= NULL
) {
1035 tail
->do_stacksize
--;
1036 tail
= tail
->do_attacheddev
;
1040 mtx_unlock(&ntoskrnl_dispatchlock
);
1041 #else /* __NetBSD__ */
1048 /* Always called with dispatcher lock held. */
1050 ntoskrnl_wakeup(void *arg
)
1052 nt_dispatch_header
*obj
;
1061 obj
->dh_sigstate
= TRUE
;
1062 e
= obj
->dh_waitlisthead
.nle_flink
;
1063 while (e
!= &obj
->dh_waitlisthead
) {
1064 w
= (wait_block
*)e
;
1065 /* TODO: is this correct? */
1068 ndis_thresume(td
->td_proc
);
1070 ndis_thresume(curproc
);
1073 * For synchronization objects, only wake up
1076 if (obj
->dh_type
== EVENT_TYPE_SYNC
)
1085 ntoskrnl_time(uint64_t *tval
)
1091 TIMEVAL_TO_TIMESPEC(&tv
,&ts
);
1096 *tval
= (uint64_t)ts
.tv_nsec
/ 100 + (uint64_t)ts
.tv_sec
* 10000000 +
1097 (uint64_t)11644473600ULL;
1103 * KeWaitForSingleObject() is a tricky beast, because it can be used
1104 * with several different object types: semaphores, timers, events,
1105 * mutexes and threads. Semaphores don't appear very often, but the
1106 * other object types are quite common. KeWaitForSingleObject() is
1107 * what's normally used to acquire a mutex, and it can be used to
1108 * wait for a thread termination.
1110 * The Windows NDIS API is implemented in terms of Windows kernel
1111 * primitives, and some of the object manipulation is duplicated in
1112 * NDIS. For example, NDIS has timers and events, which are actually
1113 * Windows kevents and ktimers. Now, you're supposed to only use the
1114 * NDIS variants of these objects within the confines of the NDIS API,
1115 * but there are some naughty developers out there who will use
1116 * KeWaitForSingleObject() on NDIS timer and event objects, so we
1117 * have to support that as well. Conseqently, our NDIS timer and event
1118 * code has to be closely tied into our ntoskrnl timer and event code,
1119 * just as it is in Windows.
1121 * KeWaitForSingleObject() may do different things for different kinds
1124 * - For events, we check if the event has been signalled. If the
1125 * event is already in the signalled state, we just return immediately,
1126 * otherwise we wait for it to be set to the signalled state by someone
1127 * else calling KeSetEvent(). Events can be either synchronization or
1128 * notification events.
1130 * - For timers, if the timer has already fired and the timer is in
1131 * the signalled state, we just return, otherwise we wait on the
1132 * timer. Unlike an event, timers get signalled automatically when
1133 * they expire rather than someone having to trip them manually.
1134 * Timers initialized with KeInitializeTimer() are always notification
1135 * events: KeInitializeTimerEx() lets you initialize a timer as
1136 * either a notification or synchronization event.
1138 * - For mutexes, we try to acquire the mutex and if we can't, we wait
1139 * on the mutex until it's available and then grab it. When a mutex is
1140 * released, it enters the signaled state, which wakes up one of the
1141 * threads waiting to acquire it. Mutexes are always synchronization
1144 * - For threads, the only thing we do is wait until the thread object
1145 * enters a signalled state, which occurs when the thread terminates.
1146 * Threads are always notification events.
1148 * A notification event wakes up all threads waiting on an object. A
1149 * synchronization event wakes up just one. Also, a synchronization event
1150 * is auto-clearing, which means we automatically set the event back to
1151 * the non-signalled state once the wakeup is done.
1155 KeWaitForSingleObject(
1156 nt_dispatch_header
*obj
,
1163 struct thread
*td
= curthread
;
1175 return(STATUS_INVALID_PARAMETER
);
1180 mtx_lock(&ntoskrnl_dispatchlock
);
1184 * See if the object is a mutex. If so, and we already own
1185 * it, then just increment the acquisition count and return.
1187 * For any other kind of object, see if it's already in the
1188 * signalled state, and if it is, just return. If the object
1189 * is marked as a synchronization event, reset the state to
1193 if (obj
->dh_size
== OTYPE_MUTEX
) {
1194 km
= (kmutant
*)obj
;
1195 if (km
->km_ownerthread
== NULL
||
1197 km
->km_ownerthread
== curthread
->td_proc
) {
1199 km
->km_ownerthread
== curproc
) {
1201 obj
->dh_sigstate
= FALSE
;
1202 km
->km_acquirecnt
++;
1204 km
->km_ownerthread
= curthread
->td_proc
;
1206 km
->km_ownerthread
= curproc
;
1210 mtx_unlock(&ntoskrnl_dispatchlock
);
1211 #else /* __NetBSD__ */
1214 return (STATUS_SUCCESS
);
1216 } else if (obj
->dh_sigstate
== TRUE
) {
1217 if (obj
->dh_type
== EVENT_TYPE_SYNC
)
1218 obj
->dh_sigstate
= FALSE
;
1221 mtx_unlock(&ntoskrnl_dispatchlock
);
1222 #else /* __NetBSD__ */
1225 return (STATUS_SUCCESS
);
1233 INSERT_LIST_TAIL((&obj
->dh_waitlisthead
), (&w
.wb_waitlist
));
1236 * The timeout value is specified in 100 nanosecond units
1237 * and can be a positive or negative number. If it's positive,
1238 * then the duetime is absolute, and we need to convert it
1239 * to an absolute offset relative to now in order to use it.
1240 * If it's negative, then the duetime is relative and we
1241 * just have to convert the units.
1244 if (duetime
!= NULL
) {
1246 tv
.tv_sec
= - (*duetime
) / 10000000;
1247 tv
.tv_usec
= (- (*duetime
) / 10) -
1248 (tv
.tv_sec
* 1000000);
1250 ntoskrnl_time(&curtime
);
1251 if (*duetime
< curtime
)
1252 tv
.tv_sec
= tv
.tv_usec
= 0;
1254 tv
.tv_sec
= ((*duetime
) - curtime
) / 10000000;
1255 tv
.tv_usec
= ((*duetime
) - curtime
) / 10 -
1256 (tv
.tv_sec
* 1000000);
1262 error
= ndis_thsuspend(td
->td_proc
, &ntoskrnl_dispatchlock
,
1263 duetime
== NULL
? 0 : tvtohz(&tv
));
1265 error
= ndis_thsuspend(curproc
, &ntoskrnl_dispatchlock
,
1266 duetime
== NULL
? 0 : tvtohz(&tv
));
1269 /* We timed out. Leave the object alone and return status. */
1271 if (error
== EWOULDBLOCK
) {
1272 REMOVE_LIST_ENTRY((&w
.wb_waitlist
));
1274 mtx_unlock(&ntoskrnl_dispatchlock
);
1275 #else /* __NetBSD__ */
1278 return(STATUS_TIMEOUT
);
1282 * Mutexes are always synchronization objects, which means
1283 * if several threads are waiting to acquire it, only one will
1284 * be woken up. If that one is us, and the mutex is up for grabs,
1288 if (obj
->dh_size
== OTYPE_MUTEX
) {
1289 km
= (kmutant
*)obj
;
1290 if (km
->km_ownerthread
== NULL
) {
1292 km
->km_ownerthread
= curthread
->td_proc
;
1294 km
->km_ownerthread
= curproc
;
1296 km
->km_acquirecnt
++;
1300 if (obj
->dh_type
== EVENT_TYPE_SYNC
)
1301 obj
->dh_sigstate
= FALSE
;
1302 REMOVE_LIST_ENTRY((&w
.wb_waitlist
));
1305 mtx_unlock(&ntoskrnl_dispatchlock
);
1306 #else /* __NetBSD__ */
1310 return(STATUS_SUCCESS
);
1313 __stdcall
static uint32_t
1314 KeWaitForMultipleObjects(
1316 nt_dispatch_header
*obj
[],
1322 wait_block
*wb_array
)
1325 struct thread
*td
= curthread
;
1328 wait_block _wb_array
[THREAD_WAIT_OBJECTS
];
1331 int i
, wcnt
= 0, widx
= 0, error
= 0;
1333 struct timespec t1
, t2
;
1335 struct timeval tv1
,tv2
;
1340 if (cnt
> MAX_WAIT_OBJECTS
)
1341 return(STATUS_INVALID_PARAMETER
);
1342 if (cnt
> THREAD_WAIT_OBJECTS
&& wb_array
== NULL
)
1343 return(STATUS_INVALID_PARAMETER
);
1348 mtx_lock(&ntoskrnl_dispatchlock
);
1351 if (wb_array
== NULL
)
1356 /* First pass: see if we can satisfy any waits immediately. */
1358 for (i
= 0; i
< cnt
; i
++) {
1359 if (obj
[i
]->dh_size
== OTYPE_MUTEX
) {
1360 km
= (kmutant
*)obj
[i
];
1361 if (km
->km_ownerthread
== NULL
||
1363 km
->km_ownerthread
== curthread
->td_proc
) {
1365 km
->km_ownerthread
== curproc
) {
1367 obj
[i
]->dh_sigstate
= FALSE
;
1368 km
->km_acquirecnt
++;
1370 km
->km_ownerthread
= curthread
->td_proc
;
1372 km
->km_ownerthread
= curproc
;
1374 if (wtype
== WAITTYPE_ANY
) {
1376 mtx_unlock(&ntoskrnl_dispatchlock
);
1377 #else /* __NetBSD__ */
1380 return (STATUS_WAIT_0
+ i
);
1383 } else if (obj
[i
]->dh_sigstate
== TRUE
) {
1384 if (obj
[i
]->dh_type
== EVENT_TYPE_SYNC
)
1385 obj
[i
]->dh_sigstate
= FALSE
;
1386 if (wtype
== WAITTYPE_ANY
) {
1388 mtx_unlock(&ntoskrnl_dispatchlock
);
1389 #else /* __NetBSD__ */
1392 return (STATUS_WAIT_0
+ i
);
1398 * Second pass: set up wait for anything we can't
1399 * satisfy immediately.
1402 for (i
= 0; i
< cnt
; i
++) {
1403 if (obj
[i
]->dh_sigstate
== TRUE
)
1405 INSERT_LIST_TAIL((&obj
[i
]->dh_waitlisthead
),
1406 (&w
[i
].wb_waitlist
));
1408 w
[i
].wb_kthread
= td
;
1410 w
[i
].wb_object
= obj
[i
];
1414 if (duetime
!= NULL
) {
1416 tv
.tv_sec
= - (*duetime
) / 10000000;
1417 tv
.tv_usec
= (- (*duetime
) / 10) -
1418 (tv
.tv_sec
* 1000000);
1420 ntoskrnl_time(&curtime
);
1421 if (*duetime
< curtime
)
1422 tv
.tv_sec
= tv
.tv_usec
= 0;
1424 tv
.tv_sec
= ((*duetime
) - curtime
) / 10000000;
1425 tv
.tv_usec
= ((*duetime
) - curtime
) / 10 -
1426 (tv
.tv_sec
* 1000000);
1436 TIMEVAL_TO_TIMESPEC(&tv1
,&t1
);
1440 error
= ndis_thsuspend(td
->td_proc
, &ntoskrnl_dispatchlock
,
1441 duetime
== NULL
? 0 : tvtohz(&tv
));
1443 error
= ndis_thsuspend(curproc
, &ntoskrnl_dispatchlock
,
1444 duetime
== NULL
? 0 : tvtohz(&tv
));
1450 TIMEVAL_TO_TIMESPEC(&tv2
,&t2
);
1453 for (i
= 0; i
< cnt
; i
++) {
1454 if (obj
[i
]->dh_size
== OTYPE_MUTEX
) {
1455 km
= (kmutant
*)obj
;
1456 if (km
->km_ownerthread
== NULL
) {
1457 km
->km_ownerthread
=
1463 km
->km_acquirecnt
++;
1466 if (obj
[i
]->dh_sigstate
== TRUE
) {
1468 if (obj
[i
]->dh_type
== EVENT_TYPE_SYNC
)
1469 obj
[i
]->dh_sigstate
= FALSE
;
1470 REMOVE_LIST_ENTRY((&w
[i
].wb_waitlist
));
1475 if (error
|| wtype
== WAITTYPE_ANY
)
1478 if (duetime
!= NULL
) {
1479 tv
.tv_sec
-= (t2
.tv_sec
- t1
.tv_sec
);
1480 tv
.tv_usec
-= (t2
.tv_nsec
- t1
.tv_nsec
) / 1000;
1485 for (i
= 0; i
< cnt
; i
++)
1486 REMOVE_LIST_ENTRY((&w
[i
].wb_waitlist
));
1489 if (error
== EWOULDBLOCK
) {
1491 mtx_unlock(&ntoskrnl_dispatchlock
);
1492 #else /* __NetBSD__ */
1495 return(STATUS_TIMEOUT
);
1498 if (wtype
== WAITTYPE_ANY
&& wcnt
) {
1500 mtx_unlock(&ntoskrnl_dispatchlock
);
1501 #else /* __NetBSD__ */
1504 return(STATUS_WAIT_0
+ widx
);
1508 mtx_unlock(&ntoskrnl_dispatchlock
);
1509 #else /* __NetBSD__ */
1513 return(STATUS_SUCCESS
);
1516 __stdcall
static void
1517 WRITE_REGISTER_USHORT(uint16_t *reg
, uint16_t val
)
1519 bus_space_write_2(NDIS_BUS_SPACE_MEM
, 0x0, (bus_size_t
)reg
, val
);
1523 __stdcall
static uint16_t
1524 READ_REGISTER_USHORT(uint16_t *reg
)
1526 return(bus_space_read_2(NDIS_BUS_SPACE_MEM
, 0x0, (bus_size_t
)reg
));
1529 __stdcall
static void
1530 WRITE_REGISTER_ULONG(uint32_t *reg
, uint32_t val
)
1532 bus_space_write_4(NDIS_BUS_SPACE_MEM
, 0x0, (bus_size_t
)reg
, val
);
1536 __stdcall
static uint32_t
1537 READ_REGISTER_ULONG(uint32_t *reg
)
1539 return(bus_space_read_4(NDIS_BUS_SPACE_MEM
, 0x0, (bus_size_t
)reg
));
1542 __stdcall
static uint8_t
1543 READ_REGISTER_UCHAR(uint8_t *reg
)
1545 return(bus_space_read_1(NDIS_BUS_SPACE_MEM
, 0x0, (bus_size_t
)reg
));
1548 __stdcall
static void
1549 WRITE_REGISTER_UCHAR(uint8_t *reg
, uint8_t val
)
1551 bus_space_write_1(NDIS_BUS_SPACE_MEM
, 0x0, (bus_size_t
)reg
, val
);
1555 __stdcall
static int64_t
1556 _allmul(int64_t a
, int64_t b
)
1561 __stdcall
static int64_t
1562 _alldiv(int64_t a
, int64_t b
)
1567 __stdcall
static int64_t
1568 _allrem(int64_t a
, int64_t b
)
1573 __stdcall
static uint64_t
1574 _aullmul(uint64_t a
, uint64_t b
)
1579 __stdcall
static uint64_t
1580 _aulldiv(uint64_t a
, uint64_t b
)
1585 __stdcall
static uint64_t
1586 _aullrem(uint64_t a
, uint64_t b
)
1591 __regparm
static int64_t
1592 _allshl(int64_t a
, uint8_t b
)
1597 __regparm
static uint64_t
1598 _aullshl(uint64_t a
, uint8_t b
)
1603 __regparm
static int64_t
1604 _allshr(int64_t a
, uint8_t b
)
1609 __regparm
static uint64_t
1610 _aullshr(uint64_t a
, uint8_t b
)
1615 static slist_entry
*
1616 ntoskrnl_pushsl(slist_header
*head
, slist_entry
*entry
)
1618 slist_entry
*oldhead
;
1620 oldhead
= head
->slh_list
.slh_next
;
1621 entry
->sl_next
= head
->slh_list
.slh_next
;
1622 head
->slh_list
.slh_next
= entry
;
1623 head
->slh_list
.slh_depth
++;
1624 head
->slh_list
.slh_seq
++;
1629 static slist_entry
*
1630 ntoskrnl_popsl(slist_header
*head
)
1634 first
= head
->slh_list
.slh_next
;
1635 if (first
!= NULL
) {
1636 head
->slh_list
.slh_next
= first
->sl_next
;
1637 head
->slh_list
.slh_depth
--;
1638 head
->slh_list
.slh_seq
++;
1645 * We need this to make lookaside lists work for amd64.
1646 * We pass a pointer to ExAllocatePoolWithTag() the lookaside
1647 * list structure. For amd64 to work right, this has to be a
1648 * pointer to the wrapped version of the routine, not the
1649 * original. Letting the Windows driver invoke the original
1650 * function directly will result in a convention calling
1651 * mismatch and a pretty crash. On x86, this effectively
1652 * becomes a no-op since ipt_func and ipt_wrap are the same.
1656 ntoskrnl_findwrap(funcptr func
)
1658 image_patch_table
*patch
;
1660 patch
= ntoskrnl_functbl
;
1661 while (patch
->ipt_func
!= NULL
) {
1662 if ((funcptr
)patch
->ipt_func
== func
)
1663 return((funcptr
)patch
->ipt_wrap
);
1670 __stdcall
static void
1671 ExInitializePagedLookasideList(
1672 paged_lookaside_list
*lookaside
,
1673 lookaside_alloc_func
*allocfunc
,
1674 lookaside_free_func
*freefunc
,
1680 memset((char *)lookaside
, 0, sizeof(paged_lookaside_list
));
1682 if (size
< sizeof(slist_entry
))
1683 lookaside
->nll_l
.gl_size
= sizeof(slist_entry
);
1685 lookaside
->nll_l
.gl_size
= size
;
1686 lookaside
->nll_l
.gl_tag
= tag
;
1687 if (allocfunc
== NULL
)
1688 lookaside
->nll_l
.gl_allocfunc
=
1689 ntoskrnl_findwrap((funcptr
)ExAllocatePoolWithTag
);
1691 lookaside
->nll_l
.gl_allocfunc
= allocfunc
;
1693 if (freefunc
== NULL
)
1694 lookaside
->nll_l
.gl_freefunc
=
1695 ntoskrnl_findwrap((funcptr
)ExFreePool
);
1697 lookaside
->nll_l
.gl_freefunc
= freefunc
;
1700 KeInitializeSpinLock(&lookaside
->nll_obsoletelock
);
1703 lookaside
->nll_l
.gl_type
= NonPagedPool
;
1704 lookaside
->nll_l
.gl_depth
= depth
;
1705 lookaside
->nll_l
.gl_maxdepth
= LOOKASIDE_DEPTH
;
1710 __stdcall
static void
1711 ExDeletePagedLookasideList(paged_lookaside_list
*lookaside
)
1714 __stdcall
void (*freefunc
)(void *);
1716 freefunc
= lookaside
->nll_l
.gl_freefunc
;
1717 while((buf
= ntoskrnl_popsl(&lookaside
->nll_l
.gl_listhead
)) != NULL
)
1718 MSCALL1(freefunc
, buf
);
1723 __stdcall
static void
1724 ExInitializeNPagedLookasideList(
1725 npaged_lookaside_list
*lookaside
,
1726 lookaside_alloc_func
*allocfunc
,
1727 lookaside_free_func
*freefunc
,
1733 memset((char *)lookaside
, 0, sizeof(npaged_lookaside_list
));
1735 if (size
< sizeof(slist_entry
))
1736 lookaside
->nll_l
.gl_size
= sizeof(slist_entry
);
1738 lookaside
->nll_l
.gl_size
= size
;
1739 lookaside
->nll_l
.gl_tag
= tag
;
1740 if (allocfunc
== NULL
)
1741 lookaside
->nll_l
.gl_allocfunc
=
1742 ntoskrnl_findwrap((funcptr
)ExAllocatePoolWithTag
);
1744 lookaside
->nll_l
.gl_allocfunc
= allocfunc
;
1746 if (freefunc
== NULL
)
1747 lookaside
->nll_l
.gl_freefunc
=
1748 ntoskrnl_findwrap((funcptr
)ExFreePool
);
1750 lookaside
->nll_l
.gl_freefunc
= freefunc
;
1753 KeInitializeSpinLock(&lookaside
->nll_obsoletelock
);
1756 lookaside
->nll_l
.gl_type
= NonPagedPool
;
1757 lookaside
->nll_l
.gl_depth
= depth
;
1758 lookaside
->nll_l
.gl_maxdepth
= LOOKASIDE_DEPTH
;
1763 __stdcall
static void
1764 ExDeleteNPagedLookasideList(npaged_lookaside_list
*lookaside
)
1767 __stdcall
void (*freefunc
)(void *);
1769 freefunc
= lookaside
->nll_l
.gl_freefunc
;
1770 while((buf
= ntoskrnl_popsl(&lookaside
->nll_l
.gl_listhead
)) != NULL
)
1771 MSCALL1(freefunc
, buf
);
1777 * Note: the interlocked slist push and pop routines are
1778 * declared to be _fastcall in Windows. gcc 3.4 is supposed
1779 * to have support for this calling convention, however we
1780 * don't have that version available yet, so we kludge things
1781 * up using __regparm__(3) and some argument shuffling.
1784 __fastcall
static slist_entry
*
1785 InterlockedPushEntrySList(REGARGS2(slist_header
*head
, slist_entry
*entry
))
1787 slist_entry
*oldhead
;
1789 oldhead
= (slist_entry
*)FASTCALL3(ExInterlockedPushEntrySList
,
1790 head
, entry
, &ntoskrnl_global
);
1795 __fastcall
static slist_entry
*
1796 InterlockedPopEntrySList(REGARGS1(slist_header
*head
))
1800 first
= (slist_entry
*)FASTCALL2(ExInterlockedPopEntrySList
,
1801 head
, &ntoskrnl_global
);
1806 __fastcall
static slist_entry
*
1807 ExInterlockedPushEntrySList(REGARGS2(slist_header
*head
,
1808 slist_entry
*entry
), kspin_lock
*lock
)
1810 slist_entry
*oldhead
;
1813 KeAcquireSpinLock(lock
, &irql
);
1814 oldhead
= ntoskrnl_pushsl(head
, entry
);
1815 KeReleaseSpinLock(lock
, irql
);
1820 __fastcall
static slist_entry
*
1821 ExInterlockedPopEntrySList(REGARGS2(slist_header
*head
, kspin_lock
*lock
))
1826 KeAcquireSpinLock(lock
, &irql
);
1827 first
= ntoskrnl_popsl(head
);
1828 KeReleaseSpinLock(lock
, irql
);
1833 __stdcall
static uint16_t
1834 ExQueryDepthSList(slist_header
*head
)
1839 KeAcquireSpinLock(&ntoskrnl_global
, &irql
);
1840 depth
= head
->slh_list
.slh_depth
;
1841 KeReleaseSpinLock(&ntoskrnl_global
, irql
);
1846 /* TODO: Make sure that LOCKDEBUG isn't defined otherwise a "struct simplelock" will
1847 * TODO: be more than 4 bytes. I'm using a kspin_lock as a simplelock, and the
1848 * TODO: kspin lock is 4 bytes, so this is OK as long as LOCKDEBUG isn't defined.
1852 * The KeInitializeSpinLock(), KefAcquireSpinLockAtDpcLevel()
1853 * and KefReleaseSpinLockFromDpcLevel() appear to be analagous
1854 * to splnet()/splx() in their use. We can't create a new mutex
1855 * lock here because there is no complimentary KeFreeSpinLock()
1856 * function. Instead, we grab a mutex from the mutex pool.
1859 KeInitializeSpinLock(kspin_lock
*lock
)
1863 #else /* __NetBSD__ */
1864 simple_lock_init((struct simplelock
*)lock
);
1872 KefAcquireSpinLockAtDpcLevel(REGARGS1(kspin_lock
*lock
))
1875 while (atomic_cmpset_acq_int((volatile u_int
*)lock
, 0, 1) == 0)
1877 #else /* __NetBSD__ */
1878 simple_lock((struct simplelock
*)lock
);
1885 KefReleaseSpinLockFromDpcLevel(REGARGS1(kspin_lock
*lock
))
1888 atomic_store_rel_int((volatile u_int
*)lock
, 0);
1889 #else /* __NetBSD__ */
1890 simple_unlock((struct simplelock
*)lock
);
1896 KeAcquireSpinLockRaiseToDpc(kspin_lock
*lock
)
1900 if (KeGetCurrentIrql() > DISPATCH_LEVEL
)
1901 panic("IRQL_NOT_LESS_THAN_OR_EQUAL");
1903 oldirql
= KeRaiseIrql(DISPATCH_LEVEL
);
1904 KeAcquireSpinLockAtDpcLevel(lock
);
1910 KeAcquireSpinLockAtDpcLevel(kspin_lock
*lock
)
1912 while (atomic_swap_uint((volatile u_int
*)lock
, 1) == 1)
1919 KeReleaseSpinLockFromDpcLevel(kspin_lock
*lock
)
1921 *(volatile u_int
*)lock
= 0;
1925 #endif /* __i386__ */
1927 __fastcall
uintptr_t
1928 InterlockedExchange(REGARGS2(volatile uint32_t *dst
, uintptr_t val
))
1933 KeAcquireSpinLock(&ntoskrnl_global
, &irql
);
1936 KeReleaseSpinLock(&ntoskrnl_global
, irql
);
1941 __fastcall
static uint32_t
1942 InterlockedIncrement(REGARGS1(volatile uint32_t *addend
))
1944 atomic_inc_32(addend
);
1948 __fastcall
static uint32_t
1949 InterlockedDecrement(REGARGS1(volatile uint32_t *addend
))
1951 atomic_dec_32(addend
);
1955 __fastcall
static void
1956 ExInterlockedAddLargeStatistic(REGARGS2(uint64_t *addend
, uint32_t inc
))
1960 KeAcquireSpinLock(&ntoskrnl_global
, &irql
);
1962 KeReleaseSpinLock(&ntoskrnl_global
, irql
);
1971 uint8_t secondarybuf
,
1972 uint8_t chargequota
,
1978 if (MmSizeOfMdl(vaddr
, len
) > MDL_ZONE_SIZE
)
1979 m
= ExAllocatePoolWithTag(NonPagedPool
,
1980 MmSizeOfMdl(vaddr
, len
), 0);
1983 m
= uma_zalloc(mdl_zone
, M_NOWAIT
| M_ZERO
);
1985 m
= pool_get(&mdl_pool
, PR_WAITOK
);
1993 MmInitializeMdl(m
, vaddr
, len
);
1996 * MmInitializMdl() clears the flags field, so we
1997 * have to set this here. If the MDL came from the
1998 * MDL UMA zone, tag it so we can release it to
1999 * the right place later.
2002 m
->mdl_flags
= MDL_ZONE_ALLOCED
;
2004 if (iopkt
!= NULL
) {
2005 if (secondarybuf
== TRUE
) {
2007 last
= iopkt
->irp_mdl
;
2008 while (last
->mdl_next
!= NULL
)
2009 last
= last
->mdl_next
;
2012 if (iopkt
->irp_mdl
!= NULL
)
2013 panic("leaking an MDL in IoAllocateMdl()");
2027 if (m
->mdl_flags
& MDL_ZONE_ALLOCED
)
2029 uma_zfree(mdl_zone
, m
);
2031 pool_put(&mdl_pool
, m
);
2039 __stdcall
static uint32_t
2040 MmSizeOfMdl(void *vaddr
, size_t len
)
2044 l
= sizeof(struct mdl
) +
2045 (sizeof(vm_offset_t
*) * SPAN_PAGES(vaddr
, len
));
2051 * The Microsoft documentation says this routine fills in the
2052 * page array of an MDL with the _physical_ page addresses that
2053 * comprise the buffer, but we don't really want to do that here.
2054 * Instead, we just fill in the page array with the kernel virtual
2055 * addresses of the buffers.
2057 __stdcall
static void
2058 MmBuildMdlForNonPagedPool(mdl
*m
)
2060 vm_offset_t
*mdl_pages
;
2063 pagecnt
= SPAN_PAGES(m
->mdl_byteoffset
, m
->mdl_bytecount
);
2065 if (pagecnt
> (m
->mdl_size
- sizeof(mdl
)) / sizeof(vm_offset_t
*))
2066 panic("not enough pages in MDL to describe buffer");
2068 mdl_pages
= MmGetMdlPfnArray(m
);
2070 for (i
= 0; i
< pagecnt
; i
++)
2071 *mdl_pages
= (vm_offset_t
)m
->mdl_startva
+ (i
* PAGE_SIZE
);
2073 m
->mdl_flags
|= MDL_SOURCE_IS_NONPAGED_POOL
;
2074 m
->mdl_mappedsystemva
= MmGetMdlVirtualAddress(m
);
2079 __stdcall
static void *
2084 buf
->mdl_flags
|= MDL_MAPPED_TO_SYSTEM_VA
;
2085 return(MmGetMdlVirtualAddress(buf
));
2088 __stdcall
static void *
2089 MmMapLockedPagesSpecifyCache(
2097 return(MmMapLockedPages(buf
, accessmode
));
2100 __stdcall
static void
2105 buf
->mdl_flags
&= ~MDL_MAPPED_TO_SYSTEM_VA
;
2109 __stdcall
static size_t
2110 RtlCompareMemory(const void *s1
, const void *s2
, size_t len
)
2112 size_t i
, total
= 0;
2115 m1
= __DECONST(char *, s1
);
2116 m2
= __DECONST(char *, s2
);
2118 for (i
= 0; i
< len
; i
++) {
2125 __stdcall
static void
2126 RtlInitAnsiString(ndis_ansi_string
*dst
, char *src
)
2128 ndis_ansi_string
*a
;
2134 a
->nas_len
= a
->nas_maxlen
= 0;
2138 a
->nas_len
= a
->nas_maxlen
= strlen(src
);
2144 __stdcall
static void
2145 RtlInitUnicodeString(ndis_unicode_string
*dst
, uint16_t *src
)
2147 ndis_unicode_string
*u
;
2154 u
->us_len
= u
->us_maxlen
= 0;
2161 u
->us_len
= u
->us_maxlen
= i
* 2;
2167 __stdcall ndis_status
2168 RtlUnicodeStringToInteger(ndis_unicode_string
*ustr
, uint32_t base
, uint32_t *val
)
2175 uchr
= ustr
->us_buf
;
2177 memset(abuf
, 0, sizeof(abuf
));
2179 if ((char)((*uchr
) & 0xFF) == '-') {
2183 } else if ((char)((*uchr
) & 0xFF) == '+') {
2190 if ((char)((*uchr
) & 0xFF) == 'b') {
2194 } else if ((char)((*uchr
) & 0xFF) == 'o') {
2198 } else if ((char)((*uchr
) & 0xFF) == 'x') {
2212 ndis_unicode_to_ascii(uchr
, len
, &astr
);
2213 *val
= strtoul(abuf
, NULL
, base
);
2215 return(NDIS_STATUS_SUCCESS
);
2218 __stdcall
static void
2219 RtlFreeUnicodeString(ndis_unicode_string
*ustr
)
2221 if (ustr
->us_buf
== NULL
)
2223 free(ustr
->us_buf
, M_DEVBUF
);
2224 ustr
->us_buf
= NULL
;
2228 __stdcall
static void
2229 RtlFreeAnsiString(ndis_ansi_string
*astr
)
2231 if (astr
->nas_buf
== NULL
)
2233 free(astr
->nas_buf
, M_DEVBUF
);
2234 astr
->nas_buf
= NULL
;
2239 atoi(const char *str
)
2242 return (int)strtol(str
, (char **)NULL
, 10);
2246 for (n
= 0; *str
&& *str
>= '0' && *str
<= '9'; str
++)
2247 n
= n
* 10 + *str
- '0';
2254 atol(const char *str
)
2257 return strtol(str
, (char **)NULL
, 10);
2261 for (n
= 0; *str
&& *str
>= '0' && *str
<= '9'; str
++)
2262 n
= n
* 10 + *str
- '0';
2270 * stolen from ./netipsec/key.c
2275 void srandom(int arg
) {return;}
2285 srandom(tv
.tv_usec
);
2286 return((int)random());
2290 srand(unsigned int seed
)
2296 __stdcall
static uint8_t
2297 IoIsWdmVersionAvailable(uint8_t major
, uint8_t minor
)
2299 if (major
== WDM_MAJOR
&& minor
== WDM_MINOR_WINXP
)
2304 __stdcall
static ndis_status
2305 IoGetDeviceProperty(
2306 device_object
*devobj
,
2315 drv
= devobj
->do_drvobj
;
2318 case DEVPROP_DRIVER_KEYNAME
:
2320 *name
= drv
->dro_drivername
.us_buf
;
2321 *reslen
= drv
->dro_drivername
.us_len
;
2324 return(STATUS_INVALID_PARAMETER_2
);
2328 return(STATUS_SUCCESS
);
2331 __stdcall
static void
2336 INIT_LIST_HEAD((&kmutex
->km_header
.dh_waitlisthead
));
2337 kmutex
->km_abandoned
= FALSE
;
2338 kmutex
->km_apcdisable
= 1;
2339 kmutex
->km_header
.dh_sigstate
= TRUE
;
2340 kmutex
->km_header
.dh_type
= EVENT_TYPE_SYNC
;
2341 kmutex
->km_header
.dh_size
= OTYPE_MUTEX
;
2342 kmutex
->km_acquirecnt
= 0;
2343 kmutex
->km_ownerthread
= NULL
;
2347 __stdcall
static uint32_t
2359 mtx_lock(&ntoskrnl_dispatchlock
);
2363 if (kmutex
->km_ownerthread
!= curthread
->td_proc
) {
2365 if (kmutex
->km_ownerthread
!= curproc
) {
2368 mtx_unlock(&ntoskrnl_dispatchlock
);
2369 #else /* __NetBSD__ */
2372 return(STATUS_MUTANT_NOT_OWNED
);
2374 kmutex
->km_acquirecnt
--;
2375 if (kmutex
->km_acquirecnt
== 0) {
2376 kmutex
->km_ownerthread
= NULL
;
2377 ntoskrnl_wakeup(&kmutex
->km_header
);
2381 mtx_unlock(&ntoskrnl_dispatchlock
);
2382 #else /* __NetBSD__ */
2386 return(kmutex
->km_acquirecnt
);
2389 __stdcall
static uint32_t
2390 KeReadStateMutex(kmutant
*kmutex
)
2392 return(kmutex
->km_header
.dh_sigstate
);
2396 KeInitializeEvent(nt_kevent
*kevent
, uint32_t type
, uint8_t state
)
2398 INIT_LIST_HEAD((&kevent
->k_header
.dh_waitlisthead
));
2399 kevent
->k_header
.dh_sigstate
= state
;
2400 kevent
->k_header
.dh_type
= type
;
2401 kevent
->k_header
.dh_size
= OTYPE_EVENT
;
2406 KeResetEvent(nt_kevent
*kevent
)
2416 mtx_lock(&ntoskrnl_dispatchlock
);
2419 prevstate
= kevent
->k_header
.dh_sigstate
;
2420 kevent
->k_header
.dh_sigstate
= FALSE
;
2423 mtx_unlock(&ntoskrnl_dispatchlock
);
2424 #else /* __NetBSD__ */
2445 mtx_lock(&ntoskrnl_dispatchlock
);
2448 prevstate
= kevent
->k_header
.dh_sigstate
;
2449 ntoskrnl_wakeup(&kevent
->k_header
);
2452 mtx_unlock(&ntoskrnl_dispatchlock
);
2453 #else /* __NetBSD__ */
2461 KeClearEvent(nt_kevent
*kevent
)
2463 kevent
->k_header
.dh_sigstate
= FALSE
;
2468 KeReadStateEvent(nt_kevent
*kevent
)
2470 return(kevent
->k_header
.dh_sigstate
);
2473 __stdcall
static ndis_status
2474 ObReferenceObjectByHandle(
2484 nr
= malloc(sizeof(nt_objref
), M_DEVBUF
, M_NOWAIT
|M_ZERO
);
2486 return(NDIS_STATUS_FAILURE
);
2488 INIT_LIST_HEAD((&nr
->no_dh
.dh_waitlisthead
));
2489 nr
->no_obj
= handle
;
2490 nr
->no_dh
.dh_size
= OTYPE_THREAD
;
2491 TAILQ_INSERT_TAIL(&ntoskrnl_reflist
, nr
, link
);
2494 return(NDIS_STATUS_SUCCESS
);
2497 __fastcall
static void
2498 ObfDereferenceObject(REGARGS1(void *object
))
2503 TAILQ_REMOVE(&ntoskrnl_reflist
, nr
, link
);
2509 __stdcall
static uint32_t
2510 ZwClose(ndis_handle handle
)
2512 return(STATUS_SUCCESS
);
2516 * This is here just in case the thread returns without calling
2517 * PsTerminateSystemThread().
2520 ntoskrnl_thrfunc(void *arg
)
2522 thread_context
*thrctx
;
2523 __stdcall
uint32_t (*tfunc
)(void *);
2528 tfunc
= thrctx
->tc_thrfunc
;
2529 tctx
= thrctx
->tc_thrctx
;
2530 free(thrctx
, M_TEMP
);
2532 rval
= MSCALL1(tfunc
, tctx
);
2534 PsTerminateSystemThread(rval
);
2535 return; /* notreached */
2538 __stdcall
static ndis_status
2539 PsCreateSystemThread(
2540 ndis_handle
*handle
,
2543 ndis_handle phandle
,
2553 tc
= malloc(sizeof(thread_context
), M_TEMP
, M_NOWAIT
);
2555 return(NDIS_STATUS_FAILURE
);
2557 tc
->tc_thrctx
= thrctx
;
2558 tc
->tc_thrfunc
= thrfunc
;
2560 sprintf(tname
, "windows kthread %d", ntoskrnl_kth
);
2562 error
= kthread_create(ntoskrnl_thrfunc
, tc
, &p
,
2563 RFHIGHPID
, NDIS_KSTACK_PAGES
, tname
);
2565 /* TODO: Provide a larger stack for these threads (NDIS_KSTACK_PAGES) */
2566 error
= ndis_kthread_create(ntoskrnl_thrfunc
, tc
, &p
, NULL
, 0, tname
);
2576 * In Windows, the exit of a thread is an event that you're allowed
2577 * to wait on, assuming you've obtained a reference to the thread using
2578 * ObReferenceObjectByHandle(). Unfortunately, the only way we can
2579 * simulate this behavior is to register each thread we create in a
2580 * reference list, and if someone holds a reference to us, we poke
2583 __stdcall
static ndis_status
2584 PsTerminateSystemThread(ndis_status status
)
2586 struct nt_objref
*nr
;
2594 mtx_lock(&ntoskrnl_dispatchlock
);
2597 TAILQ_FOREACH(nr
, &ntoskrnl_reflist
, link
) {
2599 if (nr
->no_obj
!= curthread
->td_proc
)
2601 if (nr
->no_obj
!= curproc
)
2604 ntoskrnl_wakeup(&nr
->no_dh
);
2609 mtx_unlock(&ntoskrnl_dispatchlock
);
2610 #else /* __NetBSD__ */
2617 #if __FreeBSD_version < 502113
2620 #endif /* __FreeBSD__ */
2622 return(0); /* notreached */
2626 DbgPrint(char *fmt
, ...)
2631 //va_start(ap, fmt);
2635 return(STATUS_SUCCESS
);
2638 __stdcall
static void
2641 #if defined(__FreeBSD__) && __FreeBSD_version < 502113
2642 Debugger("DbgBreakPoint(): breakpoint");
2643 #elif defined(__FreeBSD__) && __FreeBSD_version >= 502113
2644 kdb_enter("DbgBreakPoint(): breakpoint");
2645 #else /* NetBSD case */
2646 ; /* TODO Search how to go into debugger without panic */
2651 ntoskrnl_timercall(void *arg
)
2666 mtx_lock(&ntoskrnl_dispatchlock
);
2671 timer
->k_header
.dh_inserted
= FALSE
;
2674 * If this is a periodic timer, re-arm it
2675 * so it will fire again. We do this before
2676 * calling any deferred procedure calls because
2677 * it's possible the DPC might cancel the timer,
2678 * in which case it would be wrong for us to
2679 * re-arm it again afterwards.
2682 if (timer
->k_period
) {
2684 tv
.tv_usec
= timer
->k_period
* 1000;
2685 timer
->k_header
.dh_inserted
= TRUE
;
2687 timer
->k_handle
= timeout(ntoskrnl_timercall
,
2688 timer
, tvtohz(&tv
));
2689 #else /* __NetBSD__ */
2690 callout_reset(timer
->k_handle
, tvtohz(&tv
), ntoskrnl_timercall
, timer
);
2691 #endif /* __NetBSD__ */
2694 if (timer
->k_dpc
!= NULL
)
2695 KeInsertQueueDpc(timer
->k_dpc
, NULL
, NULL
);
2697 ntoskrnl_wakeup(&timer
->k_header
);
2700 mtx_unlock(&ntoskrnl_dispatchlock
);
2701 #else /* __NetBSD__ */
2713 KeInitializeTimer(ktimer
*timer
)
2718 KeInitializeTimerEx(timer
, EVENT_TYPE_NOTIFY
);
2724 KeInitializeTimerEx(ktimer
*timer
, uint32_t type
)
2729 INIT_LIST_HEAD((&timer
->k_header
.dh_waitlisthead
));
2730 timer
->k_header
.dh_sigstate
= FALSE
;
2731 timer
->k_header
.dh_inserted
= FALSE
;
2732 timer
->k_header
.dh_type
= type
;
2733 timer
->k_header
.dh_size
= OTYPE_TIMER
;
2735 callout_handle_init(&timer
->k_handle
);
2737 callout_init(timer
->k_handle
, 0);
2744 * This is a wrapper for Windows deferred procedure calls that
2745 * have been placed on an NDIS thread work queue. We need it
2746 * since the DPC could be a _stdcall function. Also, as far as
2747 * I can tell, defered procedure calls must run at DISPATCH_LEVEL.
2750 ntoskrnl_run_dpc(void *arg
)
2752 __stdcall kdpc_func dpcfunc
;
2757 dpcfunc
= dpc
->k_deferedfunc
;
2758 irql
= KeRaiseIrql(DISPATCH_LEVEL
);
2759 MSCALL4(dpcfunc
, dpc
, dpc
->k_deferredctx
,
2760 dpc
->k_sysarg1
, dpc
->k_sysarg2
);
2767 KeInitializeDpc(kdpc
*dpc
, void *dpcfunc
, void *dpcctx
)
2773 dpc
->k_deferedfunc
= dpcfunc
;
2774 dpc
->k_deferredctx
= dpcctx
;
2780 KeInsertQueueDpc(kdpc
*dpc
, void *sysarg1
, void *sysarg2
)
2782 dpc
->k_sysarg1
= sysarg1
;
2783 dpc
->k_sysarg2
= sysarg2
;
2785 if (ndis_sched(ntoskrnl_run_dpc
, dpc
, NDIS_SWI
))
2792 KeRemoveQueueDpc(kdpc
*dpc
)
2794 if (ndis_unsched(ntoskrnl_run_dpc
, dpc
, NDIS_SWI
))
2801 KeSetTimerEx(ktimer
*timer
, int64_t duetime
, uint32_t period
, kdpc
*dpc
)
2816 mtx_lock(&ntoskrnl_dispatchlock
);
2819 if (timer
->k_header
.dh_inserted
== TRUE
) {
2821 untimeout(ntoskrnl_timercall
, timer
, timer
->k_handle
);
2822 #else /* __NetBSD__ */
2823 callout_stop(timer
->k_handle
);
2825 timer
->k_header
.dh_inserted
= FALSE
;
2830 timer
->k_duetime
= duetime
;
2831 timer
->k_period
= period
;
2832 timer
->k_header
.dh_sigstate
= FALSE
;
2836 tv
.tv_sec
= - (duetime
) / 10000000;
2837 tv
.tv_usec
= (- (duetime
) / 10) -
2838 (tv
.tv_sec
* 1000000);
2840 ntoskrnl_time(&curtime
);
2841 if (duetime
< curtime
)
2842 tv
.tv_sec
= tv
.tv_usec
= 0;
2844 tv
.tv_sec
= ((duetime
) - curtime
) / 10000000;
2845 tv
.tv_usec
= ((duetime
) - curtime
) / 10 -
2846 (tv
.tv_sec
* 1000000);
2850 timer
->k_header
.dh_inserted
= TRUE
;
2852 timer
->k_handle
= timeout(ntoskrnl_timercall
, timer
, tvtohz(&tv
));
2854 callout_reset(timer
->k_handle
, tvtohz(&tv
), ntoskrnl_timercall
, timer
);
2858 mtx_unlock(&ntoskrnl_dispatchlock
);
2859 #else /* __NetBSD__ */
2867 KeSetTimer(ktimer
*timer
, int64_t duetime
, kdpc
*dpc
)
2869 return (KeSetTimerEx(timer
, duetime
, 0, dpc
));
2873 KeCancelTimer(ktimer
*timer
)
2886 mtx_lock(&ntoskrnl_dispatchlock
);
2889 if (timer
->k_header
.dh_inserted
== TRUE
) {
2891 untimeout(ntoskrnl_timercall
, timer
, timer
->k_handle
);
2892 #else /* __NetBSD__ */
2893 callout_stop(timer
->k_handle
);
2897 pending
= KeRemoveQueueDpc(timer
->k_dpc
);
2900 mtx_unlock(&ntoskrnl_dispatchlock
);
2901 #else /* __NetBSD__ */
2909 KeReadStateTimer(ktimer
*timer
)
2911 return(timer
->k_header
.dh_sigstate
);
2914 __stdcall
static void
2917 printf ("ntoskrnl dummy called...\n");
2922 image_patch_table ntoskrnl_functbl
[] = {
2923 IMPORT_FUNC(RtlCompareMemory
),
2924 IMPORT_FUNC(RtlEqualUnicodeString
),
2925 IMPORT_FUNC(RtlCopyUnicodeString
),
2926 IMPORT_FUNC(RtlUnicodeStringToAnsiString
),
2927 IMPORT_FUNC(RtlAnsiStringToUnicodeString
),
2928 IMPORT_FUNC(RtlInitAnsiString
),
2929 IMPORT_FUNC_MAP(RtlInitString
, RtlInitAnsiString
),
2930 IMPORT_FUNC(RtlInitUnicodeString
),
2931 IMPORT_FUNC(RtlFreeAnsiString
),
2932 IMPORT_FUNC(RtlFreeUnicodeString
),
2933 IMPORT_FUNC(RtlUnicodeStringToInteger
),
2934 IMPORT_FUNC(sprintf
),
2935 IMPORT_FUNC(vsprintf
),
2936 IMPORT_FUNC_MAP(_snprintf
, snprintf
),
2937 IMPORT_FUNC_MAP(_vsnprintf
, vsnprintf
),
2938 IMPORT_FUNC(DbgPrint
),
2939 IMPORT_FUNC(DbgBreakPoint
),
2940 IMPORT_FUNC(strncmp
),
2941 IMPORT_FUNC(strcmp
),
2942 IMPORT_FUNC(strncpy
),
2943 IMPORT_FUNC(strcpy
),
2944 IMPORT_FUNC(strlen
),
2945 IMPORT_FUNC(memcpy
),
2946 IMPORT_FUNC_MAP(memmove
, ntoskrnl_memset
),
2947 IMPORT_FUNC_MAP(memset
, ntoskrnl_memset
),
2948 IMPORT_FUNC(IoAllocateDriverObjectExtension
),
2949 IMPORT_FUNC(IoGetDriverObjectExtension
),
2950 IMPORT_FUNC(IofCallDriver
),
2951 IMPORT_FUNC(IofCompleteRequest
),
2952 IMPORT_FUNC(IoAcquireCancelSpinLock
),
2953 IMPORT_FUNC(IoReleaseCancelSpinLock
),
2954 IMPORT_FUNC(IoCancelIrp
),
2955 IMPORT_FUNC(IoCreateDevice
),
2956 IMPORT_FUNC(IoDeleteDevice
),
2957 IMPORT_FUNC(IoGetAttachedDevice
),
2958 IMPORT_FUNC(IoAttachDeviceToDeviceStack
),
2959 IMPORT_FUNC(IoDetachDevice
),
2960 IMPORT_FUNC(IoBuildSynchronousFsdRequest
),
2961 IMPORT_FUNC(IoBuildAsynchronousFsdRequest
),
2962 IMPORT_FUNC(IoBuildDeviceIoControlRequest
),
2963 IMPORT_FUNC(IoAllocateIrp
),
2964 IMPORT_FUNC(IoReuseIrp
),
2965 IMPORT_FUNC(IoMakeAssociatedIrp
),
2966 IMPORT_FUNC(IoFreeIrp
),
2967 IMPORT_FUNC(IoInitializeIrp
),
2968 IMPORT_FUNC(KeWaitForSingleObject
),
2969 IMPORT_FUNC(KeWaitForMultipleObjects
),
2970 IMPORT_FUNC(_allmul
),
2971 IMPORT_FUNC(_alldiv
),
2972 IMPORT_FUNC(_allrem
),
2973 IMPORT_FUNC(_allshr
),
2974 IMPORT_FUNC(_allshl
),
2975 IMPORT_FUNC(_aullmul
),
2976 IMPORT_FUNC(_aulldiv
),
2977 IMPORT_FUNC(_aullrem
),
2978 IMPORT_FUNC(_aullshr
),
2979 IMPORT_FUNC(_aullshl
),
2984 IMPORT_FUNC(WRITE_REGISTER_USHORT
),
2985 IMPORT_FUNC(READ_REGISTER_USHORT
),
2986 IMPORT_FUNC(WRITE_REGISTER_ULONG
),
2987 IMPORT_FUNC(READ_REGISTER_ULONG
),
2988 IMPORT_FUNC(READ_REGISTER_UCHAR
),
2989 IMPORT_FUNC(WRITE_REGISTER_UCHAR
),
2990 IMPORT_FUNC(ExInitializePagedLookasideList
),
2991 IMPORT_FUNC(ExDeletePagedLookasideList
),
2992 IMPORT_FUNC(ExInitializeNPagedLookasideList
),
2993 IMPORT_FUNC(ExDeleteNPagedLookasideList
),
2994 IMPORT_FUNC(InterlockedPopEntrySList
),
2995 IMPORT_FUNC(InterlockedPushEntrySList
),
2996 IMPORT_FUNC(ExQueryDepthSList
),
2997 IMPORT_FUNC_MAP(ExpInterlockedPopEntrySList
, InterlockedPopEntrySList
),
2998 IMPORT_FUNC_MAP(ExpInterlockedPushEntrySList
,
2999 InterlockedPushEntrySList
),
3000 IMPORT_FUNC(ExInterlockedPopEntrySList
),
3001 IMPORT_FUNC(ExInterlockedPushEntrySList
),
3002 IMPORT_FUNC(ExAllocatePoolWithTag
),
3003 IMPORT_FUNC(ExFreePool
),
3005 IMPORT_FUNC(KefAcquireSpinLockAtDpcLevel
),
3006 IMPORT_FUNC(KefReleaseSpinLockFromDpcLevel
),
3007 IMPORT_FUNC(KeAcquireSpinLockRaiseToDpc
),
3010 * For AMD64, we can get away with just mapping
3011 * KeAcquireSpinLockRaiseToDpc() directly to KfAcquireSpinLock()
3012 * because the calling conventions end up being the same.
3013 * On i386, we have to be careful because KfAcquireSpinLock()
3014 * is _fastcall but KeAcquireSpinLockRaiseToDpc() isn't.
3016 IMPORT_FUNC(KeAcquireSpinLockAtDpcLevel
),
3017 IMPORT_FUNC(KeReleaseSpinLockFromDpcLevel
),
3018 IMPORT_FUNC_MAP(KeAcquireSpinLockRaiseToDpc
, KfAcquireSpinLock
),
3020 IMPORT_FUNC_MAP(KeReleaseSpinLock
, KfReleaseSpinLock
),
3021 IMPORT_FUNC(InterlockedIncrement
),
3022 IMPORT_FUNC(InterlockedDecrement
),
3023 IMPORT_FUNC(ExInterlockedAddLargeStatistic
),
3024 IMPORT_FUNC(IoAllocateMdl
),
3025 IMPORT_FUNC(IoFreeMdl
),
3026 IMPORT_FUNC(MmSizeOfMdl
),
3027 IMPORT_FUNC(MmMapLockedPages
),
3028 IMPORT_FUNC(MmMapLockedPagesSpecifyCache
),
3029 IMPORT_FUNC(MmUnmapLockedPages
),
3030 IMPORT_FUNC(MmBuildMdlForNonPagedPool
),
3031 IMPORT_FUNC(KeInitializeSpinLock
),
3032 IMPORT_FUNC(IoIsWdmVersionAvailable
),
3033 IMPORT_FUNC(IoGetDeviceProperty
),
3034 IMPORT_FUNC(KeInitializeMutex
),
3035 IMPORT_FUNC(KeReleaseMutex
),
3036 IMPORT_FUNC(KeReadStateMutex
),
3037 IMPORT_FUNC(KeInitializeEvent
),
3038 IMPORT_FUNC(KeSetEvent
),
3039 IMPORT_FUNC(KeResetEvent
),
3040 IMPORT_FUNC(KeClearEvent
),
3041 IMPORT_FUNC(KeReadStateEvent
),
3042 IMPORT_FUNC(KeInitializeTimer
),
3043 IMPORT_FUNC(KeInitializeTimerEx
),
3044 IMPORT_FUNC(KeSetTimer
),
3045 IMPORT_FUNC(KeSetTimerEx
),
3046 IMPORT_FUNC(KeCancelTimer
),
3047 IMPORT_FUNC(KeReadStateTimer
),
3048 IMPORT_FUNC(KeInitializeDpc
),
3049 IMPORT_FUNC(KeInsertQueueDpc
),
3050 IMPORT_FUNC(KeRemoveQueueDpc
),
3051 IMPORT_FUNC(ObReferenceObjectByHandle
),
3052 IMPORT_FUNC(ObfDereferenceObject
),
3053 IMPORT_FUNC(ZwClose
),
3054 IMPORT_FUNC(PsCreateSystemThread
),
3055 IMPORT_FUNC(PsTerminateSystemThread
),
3058 * This last entry is a catch-all for any function we haven't
3059 * implemented yet. The PE import list patching routine will
3060 * use it for any function that doesn't have an explicit match
3064 { NULL
, (FUNC
)dummy
, NULL
},
3068 { NULL
, NULL
, NULL
}