2 * Copyright (C) 2024 Mikulas Patocka
4 * This file is part of Ajla.
6 * Ajla is free software: you can redistribute it and/or modify it under the
7 * terms of the GNU General Public License as published by the Free Software
8 * Foundation, either version 3 of the License, or (at your option) any later
11 * Ajla is distributed in the hope that it will be useful, but WITHOUT ANY
12 * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
13 * A PARTICULAR PURPOSE. See the GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License along with
16 * Ajla. If not, see <https://www.gnu.org/licenses/>.
39 #include <sys/epoll.h>
40 #ifdef IOMUX_EPOLL_INOTIFY
41 #include <sys/inotify.h>
49 #define EPOLL_MAX_EVENTS 64
52 struct list wait_list
;
53 struct epoll_event event
;
55 static handle_t ep_fd
;
57 #ifdef IOMUX_EPOLL_INOTIFY
58 static handle_t inotify_fd
;
59 struct tree inotify_wds
;
60 static mutex_t inotify_wds_mutex
;
62 struct tree_entry entry
;
63 struct list wait_list
;
70 static void iomux_wait_init(struct iomux_wait
*iow
, handle_t handle
)
72 list_init(&iow
->wait_list
);
73 iow
->event
.events
= 0;
74 iow
->event
.data
.fd
= handle
;
79 void iomux_register_wait(handle_t handle
, bool wr
, mutex_t
**mutex_to_lock
, struct list
*list_entry
)
82 struct iomux_wait
*iow
= iomux_get_iowait(handle
);
83 uint32_t event
= !wr
? EPOLLIN
: EPOLLOUT
;
85 address_lock(iow
, DEPTH_THUNK
);
88 event
|= EPOLLONESHOT
;
90 iow
->event
.events
|= event
;
91 EINTR_LOOP(r
, epoll_ctl(ep_fd
, EPOLL_CTL_ADD
, handle
, &iow
->event
));
92 if (unlikely(r
== -1)) {
94 if (unlikely(er
!= EEXIST
))
95 fatal("epoll_ctl(EPOLL_CTL_ADD) failed: %d, %s", er
, error_decode(error_from_errno(EC_SYSCALL
, er
)));
96 EINTR_LOOP(r
, epoll_ctl(ep_fd
, EPOLL_CTL_MOD
, handle
, &iow
->event
));
97 if (unlikely(r
== -1)) {
99 fatal("epoll_ctl(EPOLL_CTL_MOD) failed: %d, %s", er
, error_decode(error_from_errno(EC_SYSCALL
, er
)));
103 *mutex_to_lock
= address_get_mutex(iow
, DEPTH_THUNK
);
104 list_add(&iow
->wait_list
, list_entry
);
106 address_unlock(iow
, DEPTH_THUNK
);
110 bool iomux_test_handle(handle_t handle
, bool wr
)
115 p
.events
= !wr
? POLLIN
: POLLOUT
;
117 EINTR_LOOP(r
, poll(&p
, 1, 0));
118 if (unlikely(r
== -1)) {
122 fatal("poll failed: %d, %s", er
, error_decode(error_from_errno(EC_SYSCALL
, er
)));
128 #ifdef IOMUX_EPOLL_INOTIFY
130 static int inotify_wd_compare(const struct tree_entry
*e
, uintptr_t v
)
132 struct inotify_wd
*wd
= get_struct(e
, struct inotify_wd
, entry
);
133 return wd
->wd
- (int)v
;
136 static void process_inotify(void)
139 struct inotify_event ev
;
140 char alloc
[sizeof(struct inotify_event
) + NAME_MAX
+ 1];
144 EINTR_LOOP(r
, read(inotify_fd
, &buffer
, sizeof buffer
));
145 if (unlikely(r
== -1)) {
149 fatal("inotify: read failed: %d, %s", er
, error_decode(error_from_errno(EC_SYSCALL
, er
)));
153 struct inotify_event
*ptr
;
154 struct tree_entry
*e
;
156 if (unlikely(offset
+ (int)sizeof(struct inotify_event
) > r
)) {
157 fatal("inotify: read returned partial buffer");
159 ptr
= cast_ptr(struct inotify_event
*, &buffer
.alloc
[offset
]);
160 if (unlikely(offset
+ (int)sizeof(struct inotify_event
) + (int)ptr
->len
> r
)) {
161 fatal("inotify: file name overruns the buffer");
163 /*debug("read id: %d, %08x", ptr->wd, ptr->mask);*/
165 mutex_lock(&inotify_wds_mutex
);
166 e
= tree_find(&inotify_wds
, inotify_wd_compare
, ptr
->wd
);
168 /*debug("not found");*/
169 mutex_unlock(&inotify_wds_mutex
);
171 struct inotify_wd
*wd
= get_struct(e
, struct inotify_wd
, entry
);
172 /*debug("found seq %llx", (unsigned long long)wd->seq);*/
174 call(wake_up_wait_list
)(&wd
->wait_list
, &inotify_wds_mutex
, true);
177 offset
+= sizeof(struct inotify_event
) + ptr
->len
;
181 bool iomux_directory_handle_alloc(dir_handle_t handle
, notify_handle_t
*h
, uint64_t *seq
, ajla_error_t
*err
)
184 struct inotify_wd
*wd
;
185 struct tree_entry
*e
;
186 struct tree_insert_position ins
;
188 #ifdef NO_DIR_HANDLES
189 char *pathname
= handle
;
191 char pathname
[14 + 10 + 1];
192 sprintf(pathname
, "/proc/self/fd/%d", handle
);
195 if (unlikely(inotify_fd
== -1)) {
196 fatal_mayfail(error_ajla(EC_SYNC
, AJLA_ERROR_NOT_SUPPORTED
), err
, "directory monitoring not supported");
200 mutex_lock(&inotify_wds_mutex
);
201 /* IN_MODIFY causes an infinite loop in the /dev directory */
202 EINTR_LOOP(w
, inotify_add_watch(inotify_fd
, pathname
, IN_ATTRIB
| IN_CLOSE_WRITE
| IN_CREATE
| IN_DELETE
| IN_DELETE_SELF
| IN_MOVE_SELF
| IN_MOVED_FROM
| IN_MOVED_TO
));
203 /*debug("add watch: %d", w);*/
204 if (unlikely(w
== -1)) {
205 ajla_error_t e
= error_from_errno(EC_SYSCALL
, errno
);
206 mutex_unlock(&inotify_wds_mutex
);
207 fatal_mayfail(e
, err
, "inotify_add_watch failed: %s", error_decode(e
));
211 e
= tree_find_for_insert(&inotify_wds
, inotify_wd_compare
, w
, &ins
);
213 wd
= mem_alloc_mayfail(struct inotify_wd
*, sizeof(struct inotify_wd
), err
);
216 EINTR_LOOP(r
, inotify_rm_watch(inotify_fd
, w
));
217 /*debug("rm watch oom: %d", w);*/
218 if (unlikely(r
== -1) && errno
!= EINVAL
) {
220 fatal("inotify_rm_watch failed: %d, %s", er
, error_decode(error_from_errno(EC_SYSCALL
, er
)));
222 mutex_unlock(&inotify_wds_mutex
);
225 list_init(&wd
->wait_list
);
229 tree_insert_after_find(&wd
->entry
, &ins
);
231 wd
= get_struct(e
, struct inotify_wd
, entry
);
238 mutex_unlock(&inotify_wds_mutex
);
242 bool iomux_directory_handle_wait(notify_handle_t w
, uint64_t seq
, mutex_t
**mutex_to_lock
, struct list
*list_entry
)
244 struct inotify_wd
*wd
= w
;
246 mutex_lock(&inotify_wds_mutex
);
247 if (wd
->seq
!= seq
) {
248 mutex_unlock(&inotify_wds_mutex
);
251 *mutex_to_lock
= &inotify_wds_mutex
;
252 list_add(&wd
->wait_list
, list_entry
);
253 mutex_unlock(&inotify_wds_mutex
);
257 void iomux_directory_handle_free(notify_handle_t w
)
259 struct inotify_wd
*wd
= w
;
261 mutex_lock(&inotify_wds_mutex
);
262 if (!--wd
->refcount
) {
264 EINTR_LOOP(r
, inotify_rm_watch(inotify_fd
, wd
->wd
));
265 /*debug("rm watch: %d", wd->wd);*/
266 if (unlikely(r
== -1) && errno
!= EINVAL
) {
268 fatal("inotify_rm_watch failed: %d, %s", er
, error_decode(error_from_errno(EC_SYSCALL
, er
)));
270 tree_delete(&wd
->entry
);
273 mutex_unlock(&inotify_wds_mutex
);
279 void iomux_check_all(uint32_t us
)
281 struct epoll_event events
[EPOLL_MAX_EVENTS
];
285 us
= iomux_get_time(us
);
286 /*debug("iomux_check_all: %u", us);
287 us = minimum(us, 1000000);*/
289 if (us
!= IOMUX_INDEFINITE_WAIT
)
290 ms
= (us
+ 999) / 1000;
294 n_ev
= epoll_wait(ep_fd
, events
, EPOLL_MAX_EVENTS
, ms
);
297 if (likely(errno
== EINTR
))
300 fatal("epoll_wait failed: %d, %s", er
, error_decode(error_from_errno(EC_SYSCALL
, er
)));
303 rwlock_lock_read(&iomux_rwlock
);
304 for (i
= 0; i
< n_ev
; i
++) {
308 handle_t handle
= events
[i
].data
.fd
;
309 struct iomux_wait
*iow
;
310 if (handle
== os_notify_pipe
[0]) {
312 os_drain_notify_pipe();
317 #ifdef IOMUX_EPOLL_INOTIFY
318 if (handle
== inotify_fd
) {
323 iow
= iowait_directory
[handle
];
325 address_lock(iow
, DEPTH_THUNK
);
327 EINTR_LOOP(r
, epoll_ctl(ep_fd
, EPOLL_CTL_DEL
, handle
, &events
[i
]));
328 if (unlikely(r
== -1)) {
330 fatal("epoll_ctl(EPOLL_CTL_DEL) failed: %d, %s", er
, error_decode(error_from_errno(EC_SYSCALL
, er
)));
333 iow
->event
.events
= 0;
334 call(wake_up_wait_list
)(&iow
->wait_list
, address_get_mutex(iow
, DEPTH_THUNK
), true);
336 rwlock_unlock_read(&iomux_rwlock
);
342 void iomux_init(void)
344 struct epoll_event pipe_ev
;
346 rwlock_init(&iomux_rwlock
);
347 array_init(struct iomux_wait
*, &iowait_directory
, &iowait_directory_size
);
349 EINTR_LOOP(ep_fd
, epoll_create(EPOLL_MAX_EVENTS
));
350 if (unlikely(ep_fd
== -1)) {
352 fatal("epoll_create failed: %d, %s", er
, error_decode(error_from_errno(EC_SYSCALL
, er
)));
354 os_set_cloexec(ep_fd
);
355 obj_registry_insert(OBJ_TYPE_HANDLE
, ep_fd
, file_line
);
357 #ifdef IOMUX_EPOLL_INOTIFY
358 tree_init(&inotify_wds
);
359 mutex_init(&inotify_wds_mutex
);
360 EINTR_LOOP(inotify_fd
, inotify_init());
361 if (likely(inotify_fd
!= -1)) {
362 os_set_cloexec(inotify_fd
);
363 obj_registry_insert(OBJ_TYPE_HANDLE
, inotify_fd
, file_line
);
364 EINTR_LOOP(r
, fcntl(inotify_fd
, F_SETFL
, O_NONBLOCK
));
365 if (unlikely(r
== -1)) {
367 fatal("fcntl(F_SETFL, O_NONBLOCK) on an inotify descriptor failed: %d, %s", er
, error_decode(error_from_errno(EC_SYSCALL
, er
)));
369 pipe_ev
.events
= EPOLLIN
;
370 pipe_ev
.data
.fd
= inotify_fd
;
371 EINTR_LOOP(r
, epoll_ctl(ep_fd
, EPOLL_CTL_ADD
, inotify_fd
, &pipe_ev
));
372 if (unlikely(r
== -1)) {
374 fatal("epoll_ctl(EPOLL_CTL_ADD) failed: %d, %s", er
, error_decode(error_from_errno(EC_SYSCALL
, er
)));
379 pipe_ev
.events
= EPOLLIN
;
380 pipe_ev
.data
.fd
= os_notify_pipe
[0];
381 EINTR_LOOP(r
, epoll_ctl(ep_fd
, EPOLL_CTL_ADD
, os_notify_pipe
[0], &pipe_ev
));
382 if (unlikely(r
== -1)) {
384 fatal("epoll_ctl(EPOLL_CTL_ADD) failed: %d, %s", er
, error_decode(error_from_errno(EC_SYSCALL
, er
)));
387 thread_spawn(&iomux_thread
, iomux_poll_thread
, NULL
, PRIORITY_IO
, NULL
);
391 void iomux_done(void)
393 struct epoll_event pipe_ev
;
396 os_shutdown_notify_pipe();
398 thread_join(&iomux_thread
);
400 EINTR_LOOP(r
, epoll_ctl(ep_fd
, EPOLL_CTL_DEL
, os_notify_pipe
[0], &pipe_ev
));
401 if (unlikely(r
== -1)) {
403 fatal("epoll_ctl(EPOLL_CTL_DEL) failed: %d, %s", er
, error_decode(error_from_errno(EC_SYSCALL
, er
)));
406 #ifdef IOMUX_EPOLL_INOTIFY
407 if (likely(inotify_fd
!= -1)) {
408 EINTR_LOOP(r
, epoll_ctl(ep_fd
, EPOLL_CTL_DEL
, inotify_fd
, &pipe_ev
));
409 if (unlikely(r
== -1)) {
411 fatal("epoll_ctl(EPOLL_CTL_DEL) failed: %d, %s", er
, error_decode(error_from_errno(EC_SYSCALL
, er
)));
413 os_close(inotify_fd
);
415 if (unlikely(!tree_is_empty(&inotify_wds
)))
416 internal(file_line
, "iomux_done: inotify tree is not empty");
417 mutex_done(&inotify_wds_mutex
);
422 for (h
= 0; h
< iowait_directory_size
; h
++)
423 if (iowait_directory
[h
])
424 mem_free(iowait_directory
[h
]);
425 mem_free(iowait_directory
);
426 rwlock_done(&iomux_rwlock
);