4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
28 * posix_aio.c implements the POSIX async. I/O functions.
41 #include "thr_uberdata.h"
47 extern int __fdsync(int, int);
49 cond_t _aio_waitn_cv
= DEFAULTCV
; /* wait for end of aio_waitn */
51 static int _aio_check_timeout(const timespec_t
*, timespec_t
*, int *);
53 /* defines for timedwait in __aio_waitn() and __aio_suspend() */
54 #define AIO_TIMEOUT_INDEF -1
55 #define AIO_TIMEOUT_POLL 0
56 #define AIO_TIMEOUT_WAIT 1
57 #define AIO_TIMEOUT_UNDEF 2
62 static void _lio_list_decr(aio_lio_t
*);
63 static long aio_list_max
= 0;
66 aio_read(aiocb_t
*aiocbp
)
68 if (aiocbp
== NULL
|| aiocbp
->aio_reqprio
!= 0) {
72 if (_aio_hash_find(&aiocbp
->aio_resultp
) != NULL
) {
76 if (_aio_sigev_thread(aiocbp
) != 0)
78 aiocbp
->aio_lio_opcode
= LIO_READ
;
79 return (_aio_rw(aiocbp
, NULL
, &__nextworker_rw
, AIOAREAD
,
80 (AIO_KAIO
| AIO_NO_DUPS
)));
84 aio_write(aiocb_t
*aiocbp
)
86 if (aiocbp
== NULL
|| aiocbp
->aio_reqprio
!= 0) {
90 if (_aio_hash_find(&aiocbp
->aio_resultp
) != NULL
) {
94 if (_aio_sigev_thread(aiocbp
) != 0)
96 aiocbp
->aio_lio_opcode
= LIO_WRITE
;
97 return (_aio_rw(aiocbp
, NULL
, &__nextworker_rw
, AIOAWRITE
,
98 (AIO_KAIO
| AIO_NO_DUPS
)));
102 * __lio_listio() cancellation handler.
106 _lio_listio_cleanup(aio_lio_t
*head
)
110 ASSERT(MUTEX_HELD(&head
->lio_mutex
));
111 if (head
->lio_refcnt
== 0) {
112 ASSERT(head
->lio_nent
== 0);
115 head
->lio_waiting
= 0;
116 sig_mutex_unlock(&head
->lio_mutex
);
122 lio_listio(int mode
, aiocb_t
*_RESTRICT_KYWD
const *_RESTRICT_KYWD list
,
123 int nent
, struct sigevent
*_RESTRICT_KYWD sigevp
)
127 aio_lio_t
*head
= NULL
;
139 if (aio_list_max
== 0)
140 aio_list_max
= sysconf(_SC_AIO_LISTIO_MAX
);
142 if (nent
<= 0 || nent
> aio_list_max
) {
159 for (i
= 0; i
< nent
; i
++) {
160 if ((aiocbp
= list
[i
]) == NULL
)
162 if (_aio_hash_find(&aiocbp
->aio_resultp
) != NULL
) {
166 if (_aio_sigev_thread(aiocbp
) != 0)
168 if (aiocbp
->aio_lio_opcode
== LIO_NOP
)
169 aiocbp
->aio_state
= NOCHECK
;
171 aiocbp
->aio_state
= state
;
172 if (KAIO_SUPPORTED(aiocbp
->aio_fildes
))
175 aiocbp
->aio_resultp
.aio_errno
= ENOTSUP
;
178 if (_aio_sigev_thread_init(sigevp
) != 0)
182 error
= (int)_kaio(AIOLIO
, mode
, list
, nent
, sigevp
);
187 oerrno
= errno
= ENOTSUP
;
191 if (error
== -1 && errno
== ENOTSUP
) {
194 * If LIO_WAIT, or notification required, allocate a list head.
196 if (mode
== LIO_WAIT
||
198 (sigevp
->sigev_notify
== SIGEV_SIGNAL
||
199 sigevp
->sigev_notify
== SIGEV_THREAD
||
200 sigevp
->sigev_notify
== SIGEV_PORT
)))
201 head
= _aio_lio_alloc();
203 sig_mutex_lock(&head
->lio_mutex
);
204 head
->lio_mode
= mode
;
205 head
->lio_largefile
= 0;
206 if (mode
== LIO_NOWAIT
&& sigevp
!= NULL
) {
207 if (sigevp
->sigev_notify
== SIGEV_THREAD
) {
208 head
->lio_port
= sigevp
->sigev_signo
;
209 head
->lio_event
= AIOLIO
;
210 head
->lio_sigevent
= sigevp
;
211 head
->lio_sigval
.sival_ptr
=
212 sigevp
->sigev_value
.sival_ptr
;
213 } else if (sigevp
->sigev_notify
== SIGEV_PORT
) {
215 sigevp
->sigev_value
.sival_ptr
;
216 head
->lio_port
= pn
->portnfy_port
;
217 head
->lio_event
= AIOLIO
;
218 head
->lio_sigevent
= sigevp
;
219 head
->lio_sigval
.sival_ptr
=
221 } else { /* SIGEV_SIGNAL */
222 head
->lio_signo
= sigevp
->sigev_signo
;
223 head
->lio_sigval
.sival_ptr
=
224 sigevp
->sigev_value
.sival_ptr
;
227 head
->lio_nent
= head
->lio_refcnt
= nent
;
228 sig_mutex_unlock(&head
->lio_mutex
);
231 * find UFS requests, errno == ENOTSUP/EBADFD,
233 for (i
= 0; i
< nent
; i
++) {
234 if ((aiocbp
= list
[i
]) == NULL
||
235 aiocbp
->aio_lio_opcode
== LIO_NOP
||
236 (aiocbp
->aio_resultp
.aio_errno
!= ENOTSUP
&&
237 aiocbp
->aio_resultp
.aio_errno
!= EBADFD
)) {
239 _lio_list_decr(head
);
242 if (aiocbp
->aio_resultp
.aio_errno
== EBADFD
)
243 SET_KAIO_NOT_SUPPORTED(aiocbp
->aio_fildes
);
244 if (aiocbp
->aio_reqprio
!= 0) {
245 aiocbp
->aio_resultp
.aio_errno
= EINVAL
;
246 aiocbp
->aio_resultp
.aio_return
= -1;
249 _lio_list_decr(head
);
253 * submit an AIO request with flags AIO_NO_KAIO
254 * to avoid the kaio() syscall in _aio_rw()
256 switch (aiocbp
->aio_lio_opcode
) {
264 error
= _aio_rw(aiocbp
, head
, &__nextworker_rw
, rw
,
265 (AIO_NO_KAIO
| AIO_NO_DUPS
));
270 _lio_list_decr(head
);
271 aiocbp
->aio_resultp
.aio_errno
= error
;
280 if (mode
== LIO_WAIT
&& oerrno
== ENOTSUP
) {
282 * call kaio(AIOLIOWAIT) to get all outstanding
283 * kernel AIO requests
285 if ((nent
- aio_ufs
) > 0)
286 (void) _kaio(AIOLIOWAIT
, mode
, list
, nent
, sigevp
);
287 if (head
!= NULL
&& head
->lio_nent
> 0) {
288 sig_mutex_lock(&head
->lio_mutex
);
289 while (head
->lio_refcnt
> 0) {
291 head
->lio_waiting
= 1;
292 pthread_cleanup_push(_lio_listio_cleanup
, head
);
293 err
= sig_cond_wait(&head
->lio_cond_cv
,
295 pthread_cleanup_pop(0);
296 head
->lio_waiting
= 0;
297 if (err
&& head
->lio_nent
> 0) {
298 sig_mutex_unlock(&head
->lio_mutex
);
303 sig_mutex_unlock(&head
->lio_mutex
);
304 ASSERT(head
->lio_nent
== 0 && head
->lio_refcnt
== 0);
306 for (i
= 0; i
< nent
; i
++) {
307 if ((aiocbp
= list
[i
]) != NULL
&&
308 aiocbp
->aio_resultp
.aio_errno
) {
320 _lio_list_decr(aio_lio_t
*head
)
322 sig_mutex_lock(&head
->lio_mutex
);
325 sig_mutex_unlock(&head
->lio_mutex
);
329 * __aio_suspend() cancellation handler.
333 _aio_suspend_cleanup(int *counter
)
335 ASSERT(MUTEX_HELD(&__aio_mutex
));
336 (*counter
)--; /* _aio_kernel_suspend or _aio_suscv_cnt */
337 sig_mutex_unlock(&__aio_mutex
);
341 __aio_suspend(void **list
, int nent
, const timespec_t
*timo
, int largefile
)
343 int cv_err
; /* error code from cond_xxx() */
344 int kerr
; /* error code from _kaio(AIOSUSPEND) */
346 timespec_t twait
; /* copy of timo for internal calculations */
347 timespec_t
*wait
= NULL
;
362 aio_panic("__aio_suspend: largefile set when _LP64 defined");
371 if (timo
->tv_sec
< 0 || timo
->tv_nsec
< 0 ||
372 timo
->tv_nsec
>= NANOSEC
) {
376 /* Initialize start time if time monitoring desired */
377 if (timo
->tv_sec
> 0 || timo
->tv_nsec
> 0) {
378 timedwait
= AIO_TIMEOUT_WAIT
;
379 hrtstart
= gethrtime();
381 /* content of timeout = 0 : polling */
382 timedwait
= AIO_TIMEOUT_POLL
;
385 /* timeout pointer = NULL : wait indefinitely */
386 timedwait
= AIO_TIMEOUT_INDEF
;
391 listp64
= (aiocb64_t
**)list
;
392 for (i
= 0; i
< nent
; i
++) {
393 if ((aiocbp64
= listp64
[i
]) != NULL
&&
394 aiocbp64
->aio_state
== CHECK
)
395 aiocbp64
->aio_state
= CHECKED
;
400 listp
= (aiocb_t
**)list
;
401 for (i
= 0; i
< nent
; i
++) {
402 if ((aiocbp
= listp
[i
]) != NULL
&&
403 aiocbp
->aio_state
== CHECK
)
404 aiocbp
->aio_state
= CHECKED
;
408 sig_mutex_lock(&__aio_mutex
);
411 * The next "if -case" is required to accelerate the
412 * access to completed RAW-IO requests.
414 if ((_aio_doneq_cnt
+ _aio_outstand_cnt
) == 0) {
415 /* Only kernel requests pending */
418 * _aio_kernel_suspend is used to detect completed non RAW-IO
420 * As long as this thread resides in the kernel (_kaio) further
421 * asynchronous non RAW-IO requests could be submitted.
423 _aio_kernel_suspend
++;
426 * Always do the kaio() call without using the KAIO_SUPPORTED()
427 * checks because it is not mandatory to have a valid fd
428 * set in the list entries, only the resultp must be set.
430 * _kaio(AIOSUSPEND ...) return values :
431 * 0: everythink ok, completed request found
433 * 1: no error : _aiodone awaked the _kaio(AIOSUSPEND,,)
434 * system call using _kaio(AIONOTIFY). It means, that some
435 * non RAW-IOs completed inbetween.
438 pthread_cleanup_push(_aio_suspend_cleanup
,
439 &_aio_kernel_suspend
);
440 pthread_cleanup_push(sig_mutex_lock
, &__aio_mutex
);
441 sig_mutex_unlock(&__aio_mutex
);
443 kerr
= (int)_kaio(largefile
? AIOSUSPEND64
: AIOSUSPEND
,
444 list
, nent
, timo
, -1);
446 pthread_cleanup_pop(1); /* sig_mutex_lock(&__aio_mutex) */
447 pthread_cleanup_pop(0);
449 _aio_kernel_suspend
--;
452 sig_mutex_unlock(&__aio_mutex
);
456 kerr
= 1; /* simulation: _kaio detected AIONOTIFY */
460 * Return kernel error code if no other IOs are outstanding.
462 req_outstanding
= _aio_doneq_cnt
+ _aio_outstand_cnt
;
464 sig_mutex_unlock(&__aio_mutex
);
466 if (req_outstanding
== 0) {
467 /* no IOs outstanding in the thread pool */
469 /* return "no IOs completed" */
475 * IOs using the thread pool are outstanding.
477 if (timedwait
== AIO_TIMEOUT_WAIT
) {
478 /* time monitoring */
479 hrtend
= hrtstart
+ (hrtime_t
)timo
->tv_sec
* (hrtime_t
)NANOSEC
+
480 (hrtime_t
)timo
->tv_nsec
;
481 hrtres
= hrtend
- gethrtime();
484 twait
.tv_sec
= hrtres
/ (hrtime_t
)NANOSEC
;
485 twait
.tv_nsec
= hrtres
% (hrtime_t
)NANOSEC
;
487 } else if (timedwait
== AIO_TIMEOUT_POLL
) {
488 twait
= *timo
; /* content of timo = 0 : polling */
496 /* first scan file system requests */
498 for (i
= 0; i
< nent
; i
++) {
501 if ((aiocbp64
= listp64
[i
]) == NULL
)
503 error
= aiocbp64
->aio_resultp
.aio_errno
;
507 if ((aiocbp
= listp
[i
]) == NULL
)
509 error
= aiocbp
->aio_resultp
.aio_errno
;
511 if (error
== EINPROGRESS
)
513 else if (error
!= ECANCELED
) {
519 sig_mutex_lock(&__aio_mutex
);
522 * If there aren't outstanding I/Os in the thread pool then
523 * we have to return here, provided that all kernel RAW-IOs
525 * If the kernel was notified to return, then we have to check
526 * possible pending RAW-IOs.
528 if (_aio_outstand_cnt
== 0 && inprogress
== 0 && kerr
!= 1) {
529 sig_mutex_unlock(&__aio_mutex
);
535 * There are outstanding IOs in the thread pool or the kernel
536 * was notified to return.
537 * Check pending RAW-IOs first.
541 * _aiodone just notified the kernel about
542 * completed non RAW-IOs (AIONOTIFY was detected).
544 if (timedwait
== AIO_TIMEOUT_WAIT
) {
545 /* Update remaining timeout for the kernel */
546 hrtres
= hrtend
- gethrtime();
549 sig_mutex_unlock(&__aio_mutex
);
553 wait
->tv_sec
= hrtres
/ (hrtime_t
)NANOSEC
;
554 wait
->tv_nsec
= hrtres
% (hrtime_t
)NANOSEC
;
556 _aio_kernel_suspend
++;
558 pthread_cleanup_push(_aio_suspend_cleanup
,
559 &_aio_kernel_suspend
);
560 pthread_cleanup_push(sig_mutex_lock
, &__aio_mutex
);
561 sig_mutex_unlock(&__aio_mutex
);
563 kerr
= (int)_kaio(largefile
? AIOSUSPEND64
: AIOSUSPEND
,
564 list
, nent
, wait
, -1);
566 pthread_cleanup_pop(1);
567 pthread_cleanup_pop(0);
569 _aio_kernel_suspend
--;
572 sig_mutex_unlock(&__aio_mutex
);
577 if (timedwait
== AIO_TIMEOUT_POLL
) {
578 sig_mutex_unlock(&__aio_mutex
);
583 if (timedwait
== AIO_TIMEOUT_WAIT
) {
584 /* Update remaining timeout */
585 hrtres
= hrtend
- gethrtime();
588 sig_mutex_unlock(&__aio_mutex
);
592 wait
->tv_sec
= hrtres
/ (hrtime_t
)NANOSEC
;
593 wait
->tv_nsec
= hrtres
% (hrtime_t
)NANOSEC
;
596 if (_aio_outstand_cnt
== 0) {
597 sig_mutex_unlock(&__aio_mutex
);
601 _aio_suscv_cnt
++; /* ID for _aiodone (wake up) */
603 pthread_cleanup_push(_aio_suspend_cleanup
, &_aio_suscv_cnt
);
604 if (timedwait
== AIO_TIMEOUT_WAIT
) {
605 cv_err
= sig_cond_reltimedwait(&_aio_iowait_cv
,
610 /* wait indefinitely */
611 cv_err
= sig_cond_wait(&_aio_iowait_cv
, &__aio_mutex
);
613 /* this decrements _aio_suscv_cnt and drops __aio_mutex */
614 pthread_cleanup_pop(1);
625 aio_suspend(const aiocb_t
* const list
[], int nent
,
626 const timespec_t
*timeout
)
628 return (__aio_suspend((void **)list
, nent
, timeout
, 0));
632 aio_error(const aiocb_t
*aiocbp
)
634 const aio_result_t
*resultp
= &aiocbp
->aio_resultp
;
638 if ((error
= resultp
->aio_errno
) == EINPROGRESS
) {
639 if (aiocbp
->aio_state
== CHECK
) {
641 * Always do the kaio() call without using the
642 * KAIO_SUPPORTED() checks because it is not
643 * mandatory to have a valid fd set in the
644 * aiocb, only the resultp must be set.
646 if ((int)_kaio(AIOERROR
, aiocbp
) == EINVAL
) {
650 error
= resultp
->aio_errno
;
651 } else if (aiocbp
->aio_state
== CHECKED
) {
652 ((aiocb_t
*)aiocbp
)->aio_state
= CHECK
;
654 } else if (aiocbp
->aio_state
== USERAIO
) {
655 sig_mutex_lock(&__aio_mutex
);
656 if ((reqp
= _aio_hash_del((aio_result_t
*)resultp
)) == NULL
) {
657 sig_mutex_unlock(&__aio_mutex
);
658 ((aiocb_t
*)aiocbp
)->aio_state
= CHECKED
;
660 ((aiocb_t
*)aiocbp
)->aio_state
= NOCHECK
;
661 ASSERT(reqp
->req_head
== NULL
);
662 (void) _aio_req_remove(reqp
);
663 sig_mutex_unlock(&__aio_mutex
);
671 aio_return(aiocb_t
*aiocbp
)
673 aio_result_t
*resultp
= &aiocbp
->aio_resultp
;
679 * The _aiodone() function stores resultp->aio_return before
680 * storing resultp->aio_errno (with an membar_producer() in
681 * between). We use membar_consumer() below to ensure proper
682 * memory ordering between _aiodone() and ourself.
684 error
= resultp
->aio_errno
;
686 retval
= resultp
->aio_return
;
689 * we use this condition to indicate either that
690 * aio_return() has been called before or should
691 * not have been called yet.
693 if ((retval
== -1 && error
== EINVAL
) || error
== EINPROGRESS
) {
699 * Before we return, mark the result as being returned so that later
700 * calls to aio_return() will return the fact that the result has
701 * already been returned.
703 sig_mutex_lock(&__aio_mutex
);
704 /* retest, in case more than one thread actually got in here */
705 if (resultp
->aio_return
== -1 && resultp
->aio_errno
== EINVAL
) {
706 sig_mutex_unlock(&__aio_mutex
);
710 resultp
->aio_return
= -1;
711 resultp
->aio_errno
= EINVAL
;
712 if ((reqp
= _aio_hash_del(resultp
)) == NULL
)
713 sig_mutex_unlock(&__aio_mutex
);
715 aiocbp
->aio_state
= NOCHECK
;
716 ASSERT(reqp
->req_head
== NULL
);
717 (void) _aio_req_remove(reqp
);
718 sig_mutex_unlock(&__aio_mutex
);
728 _lio_remove(aio_req_t
*reqp
)
733 if ((head
= reqp
->req_head
) != NULL
) {
734 sig_mutex_lock(&head
->lio_mutex
);
735 ASSERT(head
->lio_refcnt
== head
->lio_nent
);
736 refcnt
= --head
->lio_nent
;
738 sig_mutex_unlock(&head
->lio_mutex
);
741 reqp
->req_head
= NULL
;
746 * This function returns the number of asynchronous I/O requests submitted.
749 __aio_fsync_bar(aiocb_t
*aiocbp
, aio_lio_t
*head
, aio_worker_t
*aiowp
,
754 aio_worker_t
*next
= aiowp
;
756 for (i
= 0; i
< workerscnt
; i
++) {
757 error
= _aio_rw(aiocbp
, head
, &next
, AIOFSYNC
, AIO_NO_KAIO
);
759 sig_mutex_lock(&head
->lio_mutex
);
760 head
->lio_mode
= LIO_DESTROY
; /* ignore fsync */
761 head
->lio_nent
-= workerscnt
- i
;
762 head
->lio_refcnt
-= workerscnt
- i
;
763 sig_mutex_unlock(&head
->lio_mutex
);
767 next
= next
->work_forw
;
773 aio_fsync(int op
, aiocb_t
*aiocbp
)
781 if (op
!= O_DSYNC
&& op
!= O_SYNC
) {
785 if (_aio_hash_find(&aiocbp
->aio_resultp
) != NULL
) {
789 if (fstat(aiocbp
->aio_fildes
, &statb
) < 0)
791 if (_aio_sigev_thread(aiocbp
) != 0)
795 * Kernel aio_fsync() is not supported.
796 * We force user-level aio_fsync() just
797 * for the notification side-effect.
799 if (!__uaio_ok
&& __uaio_init() == -1)
803 * The first asynchronous I/O request in the current process will
804 * create a bunch of workers (via __uaio_init()). If the number
805 * of workers is zero then the number of pending asynchronous I/O
806 * requests is zero. In such a case only execute the standard
807 * fsync(3C) or fdatasync(3RT) as appropriate.
809 if (__rw_workerscnt
== 0) {
811 return (__fdsync(aiocbp
->aio_fildes
, FDSYNC
));
813 return (__fdsync(aiocbp
->aio_fildes
, FSYNC
));
817 * re-use aio_offset as the op field.
818 * O_DSYNC - fdatasync()
821 aiocbp
->aio_offset
= op
;
822 aiocbp
->aio_lio_opcode
= AIOFSYNC
;
825 * Create a list of fsync requests. The worker that
826 * gets the last request will do the fsync request.
828 head
= _aio_lio_alloc();
833 head
->lio_mode
= LIO_FSYNC
;
834 head
->lio_nent
= head
->lio_refcnt
= __rw_workerscnt
;
835 head
->lio_largefile
= 0;
838 * Insert an fsync request on every worker's queue.
840 fret
= __aio_fsync_bar(aiocbp
, head
, __workers_rw
, __rw_workerscnt
);
841 if (fret
!= __rw_workerscnt
) {
843 * Fewer fsync requests than workers means that it was
844 * not possible to submit fsync requests to all workers.
846 * a) number of fsync requests submitted is 0:
847 * => free allocated memory (aio_lio_t).
848 * b) number of fsync requests submitted is > 0:
849 * => the last worker executing the fsync request
850 * will free the aio_lio_t struct.
860 aio_cancel(int fd
, aiocb_t
*aiocbp
)
868 if (fstat(fd
, &buf
) < 0)
871 if (aiocbp
!= NULL
) {
872 if (fd
!= aiocbp
->aio_fildes
) {
876 if (aiocbp
->aio_state
== USERAIO
) {
877 sig_mutex_lock(&__aio_mutex
);
878 reqp
= _aio_hash_find(&aiocbp
->aio_resultp
);
880 sig_mutex_unlock(&__aio_mutex
);
881 return (AIO_ALLDONE
);
883 aiowp
= reqp
->req_worker
;
884 sig_mutex_lock(&aiowp
->work_qlock1
);
885 (void) _aio_cancel_req(aiowp
, reqp
, &canceled
, &done
);
886 sig_mutex_unlock(&aiowp
->work_qlock1
);
887 sig_mutex_unlock(&__aio_mutex
);
889 return (AIO_ALLDONE
);
891 return (AIO_CANCELED
);
892 return (AIO_NOTCANCELED
);
894 if (aiocbp
->aio_state
== USERAIO_DONE
)
895 return (AIO_ALLDONE
);
896 return ((int)_kaio(AIOCANCEL
, fd
, aiocbp
));
899 return (aiocancel_all(fd
));
903 * __aio_waitn() cancellation handler.
907 _aio_waitn_cleanup(void *arg
)
909 ASSERT(MUTEX_HELD(&__aio_mutex
));
911 /* check for pending aio_waitn() calls */
912 _aio_flags
&= ~(AIO_LIB_WAITN
| AIO_WAIT_INPROGRESS
| AIO_IO_WAITING
);
913 if (_aio_flags
& AIO_LIB_WAITN_PENDING
) {
914 _aio_flags
&= ~AIO_LIB_WAITN_PENDING
;
915 (void) cond_signal(&_aio_waitn_cv
);
918 sig_mutex_unlock(&__aio_mutex
);
922 * aio_waitn can be used to reap the results of several I/O operations that
923 * were submitted asynchronously. The submission of I/Os can be done using
924 * existing POSIX interfaces: lio_listio, aio_write or aio_read.
925 * aio_waitn waits until "nwait" I/Os (supplied as a parameter) have
926 * completed and it returns the descriptors for these I/Os in "list". The
927 * maximum size of this list is given by "nent" and the actual number of I/Os
928 * completed is returned in "nwait". Otherwise aio_waitn might also
929 * return if the timeout expires. Additionally, aio_waitn returns 0 if
930 * successful or -1 if an error occurred.
933 __aio_waitn(void **list
, uint_t nent
, uint_t
*nwait
, const timespec_t
*utimo
)
936 uint_t dnwait
= 0; /* amount of requests in the waitn-done list */
937 uint_t kwaitcnt
; /* expected "done" requests from kernel */
938 uint_t knentcnt
; /* max. expected "done" requests from kernel */
940 int kerrno
= 0; /* save errno from _kaio() call */
941 int timedwait
= AIO_TIMEOUT_UNDEF
;
944 timespec_t twait
; /* copy of utimo for internal calculations */
945 timespec_t
*wait
= NULL
;
947 if (nent
== 0 || *nwait
== 0 || *nwait
> nent
) {
953 * Only one running aio_waitn call per process allowed.
954 * Further calls will be blocked here until the running
958 sig_mutex_lock(&__aio_mutex
);
960 while (_aio_flags
& AIO_LIB_WAITN
) {
961 if (utimo
&& utimo
->tv_sec
== 0 && utimo
->tv_nsec
== 0) {
962 sig_mutex_unlock(&__aio_mutex
);
966 _aio_flags
|= AIO_LIB_WAITN_PENDING
;
967 pthread_cleanup_push(sig_mutex_unlock
, &__aio_mutex
);
968 error
= sig_cond_wait(&_aio_waitn_cv
, &__aio_mutex
);
969 pthread_cleanup_pop(0);
971 sig_mutex_unlock(&__aio_mutex
);
978 pthread_cleanup_push(_aio_waitn_cleanup
, NULL
);
980 _aio_flags
|= AIO_LIB_WAITN
;
982 if (_aio_check_timeout(utimo
, &end
, &timedwait
) != 0) {
987 if (timedwait
!= AIO_TIMEOUT_INDEF
) {
993 * If both counters are still set to zero, then only
994 * kernel requests are currently outstanding (raw-I/Os).
996 if ((_aio_doneq_cnt
+ _aio_outstand_cnt
) == 0) {
998 kwaitcnt
= *nwait
- dnwait
;
999 knentcnt
= nent
- dnwait
;
1000 if (knentcnt
> AIO_WAITN_MAXIOCBS
)
1001 knentcnt
= AIO_WAITN_MAXIOCBS
;
1002 kwaitcnt
= (kwaitcnt
> knentcnt
) ? knentcnt
: kwaitcnt
;
1004 pthread_cleanup_push(sig_mutex_lock
, &__aio_mutex
);
1005 sig_mutex_unlock(&__aio_mutex
);
1007 error
= (int)_kaio(AIOWAITN
, &list
[dnwait
], knentcnt
,
1010 pthread_cleanup_pop(1);
1014 if (dnwait
>= *nwait
||
1015 *nwait
< AIO_WAITN_MAXIOCBS
)
1017 if (timedwait
== AIO_TIMEOUT_WAIT
) {
1018 error
= _aio_get_timedelta(&end
, wait
);
1027 if (errno
== EAGAIN
) {
1032 if (errno
== ETIME
|| errno
== EINTR
) {
1043 /* File system I/Os outstanding ... */
1045 if (timedwait
== AIO_TIMEOUT_UNDEF
) {
1046 if (_aio_check_timeout(utimo
, &end
, &timedwait
) != 0) {
1051 if (timedwait
!= AIO_TIMEOUT_INDEF
) {
1061 * Calculate sum of active non RAW-IO requests (sum_reqs).
1062 * If the expected amount of completed requests (*nwait) is
1063 * greater than the calculated sum (sum_reqs) then
1064 * use _kaio to check pending RAW-IO requests.
1066 sum_reqs
= _aio_doneq_cnt
+ dnwait
+ _aio_outstand_cnt
;
1067 kwaitcnt
= (*nwait
> sum_reqs
) ? *nwait
- sum_reqs
: 0;
1069 if (kwaitcnt
!= 0) {
1070 /* possibly some kernel I/Os outstanding */
1071 knentcnt
= nent
- dnwait
;
1072 if (knentcnt
> AIO_WAITN_MAXIOCBS
)
1073 knentcnt
= AIO_WAITN_MAXIOCBS
;
1074 kwaitcnt
= (kwaitcnt
> knentcnt
) ? knentcnt
: kwaitcnt
;
1076 _aio_flags
|= AIO_WAIT_INPROGRESS
;
1078 pthread_cleanup_push(sig_mutex_lock
, &__aio_mutex
);
1079 sig_mutex_unlock(&__aio_mutex
);
1081 error
= (int)_kaio(AIOWAITN
, &list
[dnwait
], knentcnt
,
1084 pthread_cleanup_pop(1);
1086 _aio_flags
&= ~AIO_WAIT_INPROGRESS
;
1094 /* don't wait for kernel I/Os */
1095 kerrno
= 0; /* ignore _kaio() errno */
1096 *nwait
= _aio_doneq_cnt
+
1097 _aio_outstand_cnt
+ dnwait
;
1102 /* just scan for completed LIB I/Os */
1104 timedwait
= AIO_TIMEOUT_POLL
;
1105 kerrno
= errno
; /* save _kaio() errno */
1109 kerrno
= errno
; /* save _kaio() errno */
1114 break; /* fatal kernel error */
1117 /* check completed FS requests in the "done" queue */
1119 while (_aio_doneq_cnt
&& dnwait
< nent
) {
1120 /* get done requests */
1121 if ((reqp
= _aio_req_remove(NULL
)) != NULL
) {
1122 (void) _aio_hash_del(reqp
->req_resultp
);
1123 list
[dnwait
++] = reqp
->req_aiocbp
;
1124 _aio_req_mark_done(reqp
);
1126 _aio_req_free(reqp
);
1130 if (dnwait
>= *nwait
) {
1131 /* min. requested amount of completed I/Os satisfied */
1134 if (timedwait
== AIO_TIMEOUT_WAIT
&&
1135 (error
= _aio_get_timedelta(&end
, wait
)) == -1) {
1142 * If some I/Os are outstanding and we have to wait for them,
1143 * then sleep here. _aiodone() will call _aio_waitn_wakeup()
1144 * to wakeup this thread as soon as the required amount of
1145 * completed I/Os is done.
1147 if (_aio_outstand_cnt
> 0 && timedwait
!= AIO_TIMEOUT_POLL
) {
1149 * _aio_waitn_wakeup() will wake up this thread when:
1150 * - _aio_waitncnt requests are completed or
1151 * - _aio_outstand_cnt becomes zero.
1152 * sig_cond_reltimedwait() could also return with
1153 * a timeout error (ETIME).
1155 if (*nwait
< _aio_outstand_cnt
)
1156 _aio_waitncnt
= *nwait
;
1158 _aio_waitncnt
= _aio_outstand_cnt
;
1160 _aio_flags
|= AIO_IO_WAITING
;
1163 uerrno
= sig_cond_reltimedwait(&_aio_iowait_cv
,
1164 &__aio_mutex
, wait
);
1166 uerrno
= sig_cond_wait(&_aio_iowait_cv
,
1169 _aio_flags
&= ~AIO_IO_WAITING
;
1171 if (uerrno
== ETIME
) {
1172 timedwait
= AIO_TIMEOUT_POLL
;
1176 timedwait
= AIO_TIMEOUT_POLL
;
1179 if (timedwait
== AIO_TIMEOUT_POLL
) {
1180 /* polling or timer expired */
1185 errno
= uerrno
== 0 ? kerrno
: uerrno
;
1194 pthread_cleanup_pop(1); /* drops __aio_mutex */
1200 aio_waitn(aiocb_t
*list
[], uint_t nent
, uint_t
*nwait
,
1201 const timespec_t
*timeout
)
1203 return (__aio_waitn((void **)list
, nent
, nwait
, timeout
));
1207 _aio_waitn_wakeup(void)
1210 * __aio_waitn() sets AIO_IO_WAITING to notify _aiodone() that
1211 * it is waiting for completed I/Os. The number of required
1212 * completed I/Os is stored into "_aio_waitncnt".
1213 * aio_waitn() is woken up when
1214 * - there are no further outstanding I/Os
1215 * (_aio_outstand_cnt == 0) or
1216 * - the expected number of I/Os has completed.
1217 * Only one __aio_waitn() function waits for completed I/Os at
1220 * __aio_suspend() increments "_aio_suscv_cnt" to notify
1221 * _aiodone() that at least one __aio_suspend() call is
1222 * waiting for completed I/Os.
1223 * There could be more than one __aio_suspend() function
1224 * waiting for completed I/Os. Because every function should
1225 * be waiting for different I/Os, _aiodone() has to wake up all
1226 * __aio_suspend() functions each time.
1227 * Every __aio_suspend() function will compare the recently
1228 * completed I/O with its own list.
1230 ASSERT(MUTEX_HELD(&__aio_mutex
));
1231 if (_aio_flags
& AIO_IO_WAITING
) {
1232 if (_aio_waitncnt
> 0)
1234 if (_aio_outstand_cnt
== 0 || _aio_waitncnt
== 0 ||
1236 (void) cond_broadcast(&_aio_iowait_cv
);
1238 /* Wake up waiting aio_suspend calls */
1239 if (_aio_suscv_cnt
> 0)
1240 (void) cond_broadcast(&_aio_iowait_cv
);
1245 * timedwait values :
1246 * AIO_TIMEOUT_POLL : polling
1247 * AIO_TIMEOUT_WAIT : timeout
1248 * AIO_TIMEOUT_INDEF : wait indefinitely
1251 _aio_check_timeout(const timespec_t
*utimo
, timespec_t
*end
, int *timedwait
)
1253 struct timeval curtime
;
1256 if (utimo
->tv_sec
< 0 || utimo
->tv_nsec
< 0 ||
1257 utimo
->tv_nsec
>= NANOSEC
) {
1261 if (utimo
->tv_sec
> 0 || utimo
->tv_nsec
> 0) {
1262 (void) gettimeofday(&curtime
, NULL
);
1263 end
->tv_sec
= utimo
->tv_sec
+ curtime
.tv_sec
;
1264 end
->tv_nsec
= utimo
->tv_nsec
+ 1000 * curtime
.tv_usec
;
1265 if (end
->tv_nsec
>= NANOSEC
) {
1266 end
->tv_nsec
-= NANOSEC
;
1269 *timedwait
= AIO_TIMEOUT_WAIT
;
1272 *timedwait
= AIO_TIMEOUT_POLL
;
1275 *timedwait
= AIO_TIMEOUT_INDEF
; /* wait indefinitely */
1283 aio_read64(aiocb64_t
*aiocbp
)
1285 if (aiocbp
== NULL
|| aiocbp
->aio_reqprio
!= 0) {
1289 if (_aio_hash_find(&aiocbp
->aio_resultp
) != NULL
) {
1293 if (_aio_sigev_thread64(aiocbp
) != 0)
1295 aiocbp
->aio_lio_opcode
= LIO_READ
;
1296 return (_aio_rw64(aiocbp
, NULL
, &__nextworker_rw
, AIOAREAD64
,
1297 (AIO_KAIO
| AIO_NO_DUPS
)));
1301 aio_write64(aiocb64_t
*aiocbp
)
1303 if (aiocbp
== NULL
|| aiocbp
->aio_reqprio
!= 0) {
1307 if (_aio_hash_find(&aiocbp
->aio_resultp
) != NULL
) {
1311 if (_aio_sigev_thread64(aiocbp
) != 0)
1313 aiocbp
->aio_lio_opcode
= LIO_WRITE
;
1314 return (_aio_rw64(aiocbp
, NULL
, &__nextworker_rw
, AIOAWRITE64
,
1315 (AIO_KAIO
| AIO_NO_DUPS
)));
1319 lio_listio64(int mode
, aiocb64_t
*_RESTRICT_KYWD
const *_RESTRICT_KYWD list
,
1320 int nent
, struct sigevent
*_RESTRICT_KYWD sigevp
)
1324 aio_lio_t
*head
= NULL
;
1336 if (aio_list_max
== 0)
1337 aio_list_max
= sysconf(_SC_AIO_LISTIO_MAX
);
1339 if (nent
<= 0 || nent
> aio_list_max
) {
1356 for (i
= 0; i
< nent
; i
++) {
1357 if ((aiocbp
= list
[i
]) == NULL
)
1359 if (_aio_hash_find(&aiocbp
->aio_resultp
) != NULL
) {
1363 if (_aio_sigev_thread64(aiocbp
) != 0)
1365 if (aiocbp
->aio_lio_opcode
== LIO_NOP
)
1366 aiocbp
->aio_state
= NOCHECK
;
1368 aiocbp
->aio_state
= state
;
1369 if (KAIO_SUPPORTED(aiocbp
->aio_fildes
))
1372 aiocbp
->aio_resultp
.aio_errno
= ENOTSUP
;
1375 if (_aio_sigev_thread_init(sigevp
) != 0)
1379 error
= (int)_kaio(AIOLIO64
, mode
, list
, nent
, sigevp
);
1384 oerrno
= errno
= ENOTSUP
;
1388 if (error
== -1 && errno
== ENOTSUP
) {
1391 * If LIO_WAIT, or notification required, allocate a list head.
1393 if (mode
== LIO_WAIT
||
1395 (sigevp
->sigev_notify
== SIGEV_SIGNAL
||
1396 sigevp
->sigev_notify
== SIGEV_THREAD
||
1397 sigevp
->sigev_notify
== SIGEV_PORT
)))
1398 head
= _aio_lio_alloc();
1400 sig_mutex_lock(&head
->lio_mutex
);
1401 head
->lio_mode
= mode
;
1402 head
->lio_largefile
= 1;
1403 if (mode
== LIO_NOWAIT
&& sigevp
!= NULL
) {
1404 if (sigevp
->sigev_notify
== SIGEV_THREAD
) {
1405 head
->lio_port
= sigevp
->sigev_signo
;
1406 head
->lio_event
= AIOLIO64
;
1407 head
->lio_sigevent
= sigevp
;
1408 head
->lio_sigval
.sival_ptr
=
1409 sigevp
->sigev_value
.sival_ptr
;
1410 } else if (sigevp
->sigev_notify
== SIGEV_PORT
) {
1412 sigevp
->sigev_value
.sival_ptr
;
1413 head
->lio_port
= pn
->portnfy_port
;
1414 head
->lio_event
= AIOLIO64
;
1415 head
->lio_sigevent
= sigevp
;
1416 head
->lio_sigval
.sival_ptr
=
1418 } else { /* SIGEV_SIGNAL */
1419 head
->lio_signo
= sigevp
->sigev_signo
;
1420 head
->lio_sigval
.sival_ptr
=
1421 sigevp
->sigev_value
.sival_ptr
;
1424 head
->lio_nent
= head
->lio_refcnt
= nent
;
1425 sig_mutex_unlock(&head
->lio_mutex
);
1428 * find UFS requests, errno == ENOTSUP/EBADFD,
1430 for (i
= 0; i
< nent
; i
++) {
1431 if ((aiocbp
= list
[i
]) == NULL
||
1432 aiocbp
->aio_lio_opcode
== LIO_NOP
||
1433 (aiocbp
->aio_resultp
.aio_errno
!= ENOTSUP
&&
1434 aiocbp
->aio_resultp
.aio_errno
!= EBADFD
)) {
1436 _lio_list_decr(head
);
1439 if (aiocbp
->aio_resultp
.aio_errno
== EBADFD
)
1440 SET_KAIO_NOT_SUPPORTED(aiocbp
->aio_fildes
);
1441 if (aiocbp
->aio_reqprio
!= 0) {
1442 aiocbp
->aio_resultp
.aio_errno
= EINVAL
;
1443 aiocbp
->aio_resultp
.aio_return
= -1;
1446 _lio_list_decr(head
);
1450 * submit an AIO request with flags AIO_NO_KAIO
1451 * to avoid the kaio() syscall in _aio_rw()
1453 switch (aiocbp
->aio_lio_opcode
) {
1461 error
= _aio_rw64(aiocbp
, head
, &__nextworker_rw
, rw
,
1462 (AIO_NO_KAIO
| AIO_NO_DUPS
));
1467 _lio_list_decr(head
);
1468 aiocbp
->aio_resultp
.aio_errno
= error
;
1477 if (mode
== LIO_WAIT
&& oerrno
== ENOTSUP
) {
1479 * call kaio(AIOLIOWAIT) to get all outstanding
1480 * kernel AIO requests
1482 if ((nent
- aio_ufs
) > 0)
1483 (void) _kaio(AIOLIOWAIT
, mode
, list
, nent
, sigevp
);
1484 if (head
!= NULL
&& head
->lio_nent
> 0) {
1485 sig_mutex_lock(&head
->lio_mutex
);
1486 while (head
->lio_refcnt
> 0) {
1488 head
->lio_waiting
= 1;
1489 pthread_cleanup_push(_lio_listio_cleanup
, head
);
1490 err
= sig_cond_wait(&head
->lio_cond_cv
,
1492 pthread_cleanup_pop(0);
1493 head
->lio_waiting
= 0;
1494 if (err
&& head
->lio_nent
> 0) {
1495 sig_mutex_unlock(&head
->lio_mutex
);
1500 sig_mutex_unlock(&head
->lio_mutex
);
1501 ASSERT(head
->lio_nent
== 0 && head
->lio_refcnt
== 0);
1502 _aio_lio_free(head
);
1503 for (i
= 0; i
< nent
; i
++) {
1504 if ((aiocbp
= list
[i
]) != NULL
&&
1505 aiocbp
->aio_resultp
.aio_errno
) {
1517 aio_suspend64(const aiocb64_t
* const list
[], int nent
,
1518 const timespec_t
*timeout
)
1520 return (__aio_suspend((void **)list
, nent
, timeout
, 1));
1524 aio_error64(const aiocb64_t
*aiocbp
)
1526 const aio_result_t
*resultp
= &aiocbp
->aio_resultp
;
1529 if ((error
= resultp
->aio_errno
) == EINPROGRESS
) {
1530 if (aiocbp
->aio_state
== CHECK
) {
1532 * Always do the kaio() call without using the
1533 * KAIO_SUPPORTED() checks because it is not
1534 * mandatory to have a valid fd set in the
1535 * aiocb, only the resultp must be set.
1537 if ((int)_kaio(AIOERROR64
, aiocbp
) == EINVAL
) {
1541 error
= resultp
->aio_errno
;
1542 } else if (aiocbp
->aio_state
== CHECKED
) {
1543 ((aiocb64_t
*)aiocbp
)->aio_state
= CHECK
;
1550 aio_return64(aiocb64_t
*aiocbp
)
1552 aio_result_t
*resultp
= &aiocbp
->aio_resultp
;
1558 * The _aiodone() function stores resultp->aio_return before
1559 * storing resultp->aio_errno (with an membar_producer() in
1560 * between). We use membar_consumer() below to ensure proper
1561 * memory ordering between _aiodone() and ourself.
1563 error
= resultp
->aio_errno
;
1565 retval
= resultp
->aio_return
;
1568 * we use this condition to indicate either that
1569 * aio_return() has been called before or should
1570 * not have been called yet.
1572 if ((retval
== -1 && error
== EINVAL
) || error
== EINPROGRESS
) {
1578 * Before we return, mark the result as being returned so that later
1579 * calls to aio_return() will return the fact that the result has
1580 * already been returned.
1582 sig_mutex_lock(&__aio_mutex
);
1583 /* retest, in case more than one thread actually got in here */
1584 if (resultp
->aio_return
== -1 && resultp
->aio_errno
== EINVAL
) {
1585 sig_mutex_unlock(&__aio_mutex
);
1589 resultp
->aio_return
= -1;
1590 resultp
->aio_errno
= EINVAL
;
1591 if ((reqp
= _aio_hash_del(resultp
)) == NULL
)
1592 sig_mutex_unlock(&__aio_mutex
);
1594 aiocbp
->aio_state
= NOCHECK
;
1595 ASSERT(reqp
->req_head
== NULL
);
1596 (void) _aio_req_remove(reqp
);
1597 sig_mutex_unlock(&__aio_mutex
);
1598 _aio_req_free(reqp
);
1607 __aio_fsync_bar64(aiocb64_t
*aiocbp
, aio_lio_t
*head
, aio_worker_t
*aiowp
,
1612 aio_worker_t
*next
= aiowp
;
1614 for (i
= 0; i
< workerscnt
; i
++) {
1615 error
= _aio_rw64(aiocbp
, head
, &next
, AIOFSYNC
, AIO_NO_KAIO
);
1617 sig_mutex_lock(&head
->lio_mutex
);
1618 head
->lio_mode
= LIO_DESTROY
; /* ignore fsync */
1619 head
->lio_nent
-= workerscnt
- i
;
1620 head
->lio_refcnt
-= workerscnt
- i
;
1621 sig_mutex_unlock(&head
->lio_mutex
);
1625 next
= next
->work_forw
;
1631 aio_fsync64(int op
, aiocb64_t
*aiocbp
)
1634 struct stat64 statb
;
1639 if (op
!= O_DSYNC
&& op
!= O_SYNC
) {
1643 if (_aio_hash_find(&aiocbp
->aio_resultp
) != NULL
) {
1647 if (fstat64(aiocbp
->aio_fildes
, &statb
) < 0)
1649 if (_aio_sigev_thread64(aiocbp
) != 0)
1653 * Kernel aio_fsync() is not supported.
1654 * We force user-level aio_fsync() just
1655 * for the notification side-effect.
1657 if (!__uaio_ok
&& __uaio_init() == -1)
1661 * The first asynchronous I/O request in the current process will
1662 * create a bunch of workers (via __uaio_init()). If the number
1663 * of workers is zero then the number of pending asynchronous I/O
1664 * requests is zero. In such a case only execute the standard
1665 * fsync(3C) or fdatasync(3RT) as appropriate.
1667 if (__rw_workerscnt
== 0) {
1669 return (__fdsync(aiocbp
->aio_fildes
, FDSYNC
));
1671 return (__fdsync(aiocbp
->aio_fildes
, FSYNC
));
1675 * re-use aio_offset as the op field.
1676 * O_DSYNC - fdatasync()
1679 aiocbp
->aio_offset
= op
;
1680 aiocbp
->aio_lio_opcode
= AIOFSYNC
;
1683 * Create a list of fsync requests. The worker that
1684 * gets the last request will do the fsync request.
1686 head
= _aio_lio_alloc();
1691 head
->lio_mode
= LIO_FSYNC
;
1692 head
->lio_nent
= head
->lio_refcnt
= __rw_workerscnt
;
1693 head
->lio_largefile
= 1;
1696 * Insert an fsync request on every worker's queue.
1698 fret
= __aio_fsync_bar64(aiocbp
, head
, __workers_rw
, __rw_workerscnt
);
1699 if (fret
!= __rw_workerscnt
) {
1701 * Fewer fsync requests than workers means that it was
1702 * not possible to submit fsync requests to all workers.
1704 * a) number of fsync requests submitted is 0:
1705 * => free allocated memory (aio_lio_t).
1706 * b) number of fsync requests submitted is > 0:
1707 * => the last worker executing the fsync request
1708 * will free the aio_lio_t struct.
1711 _aio_lio_free(head
);
1718 aio_cancel64(int fd
, aiocb64_t
*aiocbp
)
1721 aio_worker_t
*aiowp
;
1726 if (fstat64(fd
, &buf
) < 0)
1729 if (aiocbp
!= NULL
) {
1730 if (fd
!= aiocbp
->aio_fildes
) {
1734 if (aiocbp
->aio_state
== USERAIO
) {
1735 sig_mutex_lock(&__aio_mutex
);
1736 reqp
= _aio_hash_find(&aiocbp
->aio_resultp
);
1738 sig_mutex_unlock(&__aio_mutex
);
1739 return (AIO_ALLDONE
);
1741 aiowp
= reqp
->req_worker
;
1742 sig_mutex_lock(&aiowp
->work_qlock1
);
1743 (void) _aio_cancel_req(aiowp
, reqp
, &canceled
, &done
);
1744 sig_mutex_unlock(&aiowp
->work_qlock1
);
1745 sig_mutex_unlock(&__aio_mutex
);
1747 return (AIO_ALLDONE
);
1749 return (AIO_CANCELED
);
1750 return (AIO_NOTCANCELED
);
1752 if (aiocbp
->aio_state
== USERAIO_DONE
)
1753 return (AIO_ALLDONE
);
1754 return ((int)_kaio(AIOCANCEL
, fd
, aiocbp
));
1757 return (aiocancel_all(fd
));
1761 aio_waitn64(aiocb64_t
*list
[], uint_t nent
, uint_t
*nwait
,
1762 const timespec_t
*timeout
)
1764 return (__aio_waitn((void **)list
, nent
, nwait
, timeout
));
1767 #endif /* !defined(_LP64) */