2 * Copyright (C) 2024 Mikulas Patocka
4 * This file is part of Ajla.
6 * Ajla is free software: you can redistribute it and/or modify it under the
7 * terms of the GNU General Public License as published by the Free Software
8 * Foundation, either version 3 of the License, or (at your option) any later
11 * Ajla is distributed in the hope that it will be useful, but WITHOUT ANY
12 * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
13 * A PARTICULAR PURPOSE. See the GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License along with
16 * Ajla. If not, see <https://www.gnu.org/licenses/>.
37 #include <sys/event.h>
39 #define KQUEUE_MAX_EVENTS 64
42 struct list wait_list
[3];
47 static handle_t kq_fd
;
49 static void iomux_wait_init(struct iomux_wait
*iow
, handle_t handle
)
51 list_init(&iow
->wait_list
[0]);
52 list_init(&iow
->wait_list
[1]);
53 list_init(&iow
->wait_list
[2]);
60 void iomux_register_wait(handle_t handle
, bool wr
, mutex_t
**mutex_to_lock
, struct list
*list_entry
)
63 struct iomux_wait
*iow
= iomux_get_iowait(handle
);
66 EV_SET(&event
, handle
, !wr
? EVFILT_READ
: EVFILT_WRITE
, EV_ADD
| EV_ONESHOT
, 0, 0, 0);
68 address_lock(iow
, DEPTH_THUNK
);
70 EINTR_LOOP(r
, kevent(kq_fd
, &event
, 1, NULL
, 0, NULL
));
71 if (unlikely(r
== -1)) {
73 fatal("kevent failed adding a new handle: %d, %s", er
, error_decode(error_from_errno(EC_SYSCALL
, er
)));
76 *mutex_to_lock
= address_get_mutex(iow
, DEPTH_THUNK
);
77 list_add(&iow
->wait_list
[(int)wr
], list_entry
);
79 address_unlock(iow
, DEPTH_THUNK
);
81 #if !defined(THREAD_NONE) && defined(__APPLE__)
87 bool iomux_test_handle(handle_t handle
, bool wr
)
92 p
.events
= !wr
? POLLIN
: POLLOUT
;
94 EINTR_LOOP(r
, poll(&p
, 1, 0));
95 if (unlikely(r
== -1)) {
99 fatal("poll failed: %d, %s", er
, error_decode(error_from_errno(EC_SYSCALL
, er
)));
105 bool iomux_directory_handle_alloc(dir_handle_t attr_unused handle
, notify_handle_t attr_unused
*h
, uint64_t attr_unused
*seq
, ajla_error_t
*err
)
107 #ifndef NO_DIR_HANDLES
110 struct iomux_wait
*iow
;
113 EINTR_LOOP(newfd
, dup(handle
));
114 if (unlikely(newfd
== -1)) {
115 ajla_error_t e
= error_from_errno(EC_SYSCALL
, errno
);
116 fatal_mayfail(e
, err
, "dup failed: %s", error_decode(e
));
119 iow
= iomux_get_iowait(newfd
);
121 EV_SET(&event
, newfd
, EVFILT_VNODE
, EV_ADD
| EV_ONESHOT
, NOTE_WRITE
, 0, 0);
123 address_lock(iow
, DEPTH_THUNK
);
125 EINTR_LOOP(r
, kevent(kq_fd
, &event
, 1, NULL
, 0, NULL
));
126 if (unlikely(r
== -1)) {
127 ajla_error_t e
= error_from_errno(EC_SYSCALL
, errno
);
128 address_unlock(iow
, DEPTH_THUNK
);
129 os_close_handle(newfd
);
130 fatal_mayfail(e
, err
, "adding a directory watch failed: %s", error_decode(e
));
137 address_unlock(iow
, DEPTH_THUNK
);
138 #if !defined(THREAD_NONE) && defined(__APPLE__)
143 fatal_mayfail(error_ajla(EC_SYNC
, AJLA_ERROR_NOT_SUPPORTED
), err
, "directory monitoring not supported");
148 bool iomux_directory_handle_wait(notify_handle_t h
, uint64_t seq
, mutex_t
**mutex_to_lock
, struct list
*list_entry
)
150 struct iomux_wait
*iow
= h
;
152 address_lock(iow
, DEPTH_THUNK
);
153 if (iow
->seq
!= seq
) {
154 address_unlock(iow
, DEPTH_THUNK
);
158 *mutex_to_lock
= address_get_mutex(iow
, DEPTH_THUNK
);
159 list_add(&iow
->wait_list
[2], list_entry
);
160 address_unlock(iow
, DEPTH_THUNK
);
165 void iomux_directory_handle_free(notify_handle_t h
)
167 struct iomux_wait
*iow
= h
;
168 os_close_handle(iow
->self
);
172 void iomux_check_all(uint32_t us
)
174 struct kevent events
[KQUEUE_MAX_EVENTS
];
178 us
= iomux_get_time(us
);
180 if (us
!= IOMUX_INDEFINITE_WAIT
) {
181 ts
.tv_sec
= us
/ 1000000;
182 ts
.tv_nsec
= us
% 1000000 * 1000;
185 n_ev
= kevent(kq_fd
, NULL
, 0, events
, KQUEUE_MAX_EVENTS
, us
!= IOMUX_INDEFINITE_WAIT
? &ts
: NULL
);
186 if (unlikely(n_ev
== -1)) {
188 if (likely(errno
== EINTR
))
191 fatal("kevent failed getting events: %d, %s", er
, error_decode(error_from_errno(EC_SYSCALL
, er
)));
194 rwlock_lock_read(&iomux_rwlock
);
195 for (i
= 0; i
< n_ev
; i
++) {
196 struct kevent
*ev
= &events
[i
];
197 handle_t handle
= ev
->ident
;
198 struct iomux_wait
*iow
;
199 if (handle
== os_notify_pipe
[0]) {
201 os_drain_notify_pipe();
205 iow
= iowait_directory
[handle
];
206 /*debug("handle: %d, iow: %p", handle, iow);*/
208 address_lock(iow
, DEPTH_THUNK
);
209 if (ev
->filter
== EVFILT_READ
) {
210 call(wake_up_wait_list
)(&iow
->wait_list
[0], address_get_mutex(iow
, DEPTH_THUNK
), true);
211 } else if (ev
->filter
== EVFILT_WRITE
) {
212 call(wake_up_wait_list
)(&iow
->wait_list
[1], address_get_mutex(iow
, DEPTH_THUNK
), true);
213 } else if (ev
->filter
== EVFILT_VNODE
) {
215 call(wake_up_wait_list
)(&iow
->wait_list
[2], address_get_mutex(iow
, DEPTH_THUNK
), true);
217 fatal("kevent returned unknown event %d", ev
->filter
);
220 rwlock_unlock_read(&iomux_rwlock
);
226 void iomux_init(void)
228 struct kevent pipe_ev
;
230 rwlock_init(&iomux_rwlock
);
231 array_init(struct iomux_wait
*, &iowait_directory
, &iowait_directory_size
);
233 EINTR_LOOP(kq_fd
, kqueue());
234 if (unlikely(kq_fd
== -1)) {
236 fatal("kqueue failed: %d, %s", er
, error_decode(error_from_errno(EC_SYSCALL
, er
)));
238 os_set_cloexec(kq_fd
);
239 obj_registry_insert(OBJ_TYPE_HANDLE
, kq_fd
, file_line
);
241 EV_SET(&pipe_ev
, os_notify_pipe
[0], EVFILT_READ
, EV_ADD
, 0, 0, 0);
242 EINTR_LOOP(r
, kevent(kq_fd
, &pipe_ev
, 1, NULL
, 0, NULL
));
243 if (unlikely(r
== -1)) {
245 fatal("kevent failed adding notify pipe: %d, %s", er
, error_decode(error_from_errno(EC_SYSCALL
, er
)));
248 thread_spawn(&iomux_thread
, iomux_poll_thread
, NULL
, PRIORITY_IO
, NULL
);
252 void iomux_done(void)
254 struct kevent pipe_ev
;
257 os_shutdown_notify_pipe();
259 thread_join(&iomux_thread
);
261 EV_SET(&pipe_ev
, os_notify_pipe
[0], EVFILT_READ
, EV_DELETE
, 0, 0, 0);
262 EINTR_LOOP(r
, kevent(kq_fd
, &pipe_ev
, 1, NULL
, 0, NULL
));
263 if (unlikely(r
== -1)) {
265 fatal("kevent failed removing notify pipe: %d, %s", er
, error_decode(error_from_errno(EC_SYSCALL
, er
)));
269 for (h
= 0; h
< iowait_directory_size
; h
++)
270 if (iowait_directory
[h
])
271 mem_free(iowait_directory
[h
]);
272 mem_free(iowait_directory
);
273 rwlock_done(&iomux_rwlock
);