codegen: use the and instruction when doing zero-extend
[ajla.git] / iomux_ep.c
blob7a26791a5082740a755c2687e0154aa56e6624ee
1 /*
2 * Copyright (C) 2024 Mikulas Patocka
4 * This file is part of Ajla.
6 * Ajla is free software: you can redistribute it and/or modify it under the
7 * terms of the GNU General Public License as published by the Free Software
8 * Foundation, either version 3 of the License, or (at your option) any later
9 * version.
11 * Ajla is distributed in the hope that it will be useful, but WITHOUT ANY
12 * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
13 * A PARTICULAR PURPOSE. See the GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License along with
16 * Ajla. If not, see <https://www.gnu.org/licenses/>.
19 #include "ajla.h"
21 #include "list.h"
22 #include "mem_al.h"
23 #include "thread.h"
24 #include "addrlock.h"
25 #include "rwlock.h"
26 #include "str.h"
27 #include "os.h"
28 #include "timer.h"
29 #include "obj_reg.h"
31 #include "iomux.h"
33 #ifdef IOMUX_EPOLL
35 #include <stdio.h>
36 #include <unistd.h>
37 #include <fcntl.h>
38 #include <sys/poll.h>
39 #include <sys/epoll.h>
40 #ifdef IOMUX_EPOLL_INOTIFY
41 #include <sys/inotify.h>
42 #endif
43 #include <limits.h>
45 #ifndef NAME_MAX
46 #define NAME_MAX 255
47 #endif
49 #define EPOLL_MAX_EVENTS 64
51 struct iomux_wait {
52 struct list wait_list;
53 struct epoll_event event;
55 static handle_t ep_fd;
57 #ifdef IOMUX_EPOLL_INOTIFY
58 static handle_t inotify_fd;
59 struct tree inotify_wds;
60 static mutex_t inotify_wds_mutex;
61 struct inotify_wd {
62 struct tree_entry entry;
63 struct list wait_list;
64 int wd;
65 uintptr_t refcount;
66 uint64_t seq;
68 #endif
70 static void iomux_wait_init(struct iomux_wait *iow, handle_t handle)
72 list_init(&iow->wait_list);
73 iow->event.events = 0;
74 iow->event.data.fd = handle;
77 #include "iomux.inc"
79 void iomux_register_wait(handle_t handle, bool wr, mutex_t **mutex_to_lock, struct list *list_entry)
81 int r;
82 struct iomux_wait *iow = iomux_get_iowait(handle);
83 uint32_t event = !wr ? EPOLLIN : EPOLLOUT;
85 address_lock(iow, DEPTH_THUNK);
87 #ifdef EPOLLONESHOT
88 event |= EPOLLONESHOT;
89 #endif
90 iow->event.events |= event;
91 EINTR_LOOP(r, epoll_ctl(ep_fd, EPOLL_CTL_ADD, handle, &iow->event));
92 if (unlikely(r == -1)) {
93 int er = errno;
94 if (unlikely(er != EEXIST))
95 fatal("epoll_ctl(EPOLL_CTL_ADD) failed: %d, %s", er, error_decode(error_from_errno(EC_SYSCALL, er)));
96 EINTR_LOOP(r, epoll_ctl(ep_fd, EPOLL_CTL_MOD, handle, &iow->event));
97 if (unlikely(r == -1)) {
98 int er = errno;
99 fatal("epoll_ctl(EPOLL_CTL_MOD) failed: %d, %s", er, error_decode(error_from_errno(EC_SYSCALL, er)));
103 *mutex_to_lock = address_get_mutex(iow, DEPTH_THUNK);
104 list_add(&iow->wait_list, list_entry);
106 address_unlock(iow, DEPTH_THUNK);
110 bool iomux_test_handle(handle_t handle, bool wr)
112 struct pollfd p;
113 int r;
114 p.fd = handle;
115 p.events = !wr ? POLLIN : POLLOUT;
116 again:
117 EINTR_LOOP(r, poll(&p, 1, 0));
118 if (unlikely(r == -1)) {
119 int er = errno;
120 if (er == EAGAIN)
121 goto again;
122 fatal("poll failed: %d, %s", er, error_decode(error_from_errno(EC_SYSCALL, er)));
124 return !!r;
128 #ifdef IOMUX_EPOLL_INOTIFY
130 static int inotify_wd_compare(const struct tree_entry *e, uintptr_t v)
132 struct inotify_wd *wd = get_struct(e, struct inotify_wd, entry);
133 return wd->wd - (int)v;
136 static void process_inotify(void)
138 union {
139 struct inotify_event ev;
140 char alloc[sizeof(struct inotify_event) + NAME_MAX + 1];
141 } buffer;
142 int offset;
143 int r;
144 EINTR_LOOP(r, read(inotify_fd, &buffer, sizeof buffer));
145 if (unlikely(r == -1)) {
146 int er = errno;
147 if (er == EAGAIN)
148 return;
149 fatal("inotify: read failed: %d, %s", er, error_decode(error_from_errno(EC_SYSCALL, er)));
151 offset = 0;
152 while (offset < r) {
153 struct inotify_event *ptr;
154 struct tree_entry *e;
156 if (unlikely(offset + (int)sizeof(struct inotify_event) > r)) {
157 fatal("inotify: read returned partial buffer");
159 ptr = cast_ptr(struct inotify_event *, &buffer.alloc[offset]);
160 if (unlikely(offset + (int)sizeof(struct inotify_event) + (int)ptr->len > r)) {
161 fatal("inotify: file name overruns the buffer");
163 /*debug("read id: %d, %08x", ptr->wd, ptr->mask);*/
165 mutex_lock(&inotify_wds_mutex);
166 e = tree_find(&inotify_wds, inotify_wd_compare, ptr->wd);
167 if (unlikely(!e)) {
168 /*debug("not found");*/
169 mutex_unlock(&inotify_wds_mutex);
170 } else {
171 struct inotify_wd *wd = get_struct(e, struct inotify_wd, entry);
172 /*debug("found seq %llx", (unsigned long long)wd->seq);*/
173 wd->seq++;
174 call(wake_up_wait_list)(&wd->wait_list, &inotify_wds_mutex, true);
177 offset += sizeof(struct inotify_event) + ptr->len;
181 bool iomux_directory_handle_alloc(dir_handle_t handle, notify_handle_t *h, uint64_t *seq, ajla_error_t *err)
183 int w;
184 struct inotify_wd *wd;
185 struct tree_entry *e;
186 struct tree_insert_position ins;
188 #ifdef NO_DIR_HANDLES
189 char *pathname = handle;
190 #else
191 char pathname[14 + 10 + 1];
192 sprintf(pathname, "/proc/self/fd/%d", handle);
193 #endif
195 if (unlikely(inotify_fd == -1)) {
196 fatal_mayfail(error_ajla(EC_SYNC, AJLA_ERROR_NOT_SUPPORTED), err, "directory monitoring not supported");
197 return false;
200 mutex_lock(&inotify_wds_mutex);
201 /* IN_MODIFY causes an infinite loop in the /dev directory */
202 EINTR_LOOP(w, inotify_add_watch(inotify_fd, pathname, IN_ATTRIB | IN_CLOSE_WRITE | IN_CREATE | IN_DELETE | IN_DELETE_SELF | IN_MOVE_SELF | IN_MOVED_FROM | IN_MOVED_TO));
203 /*debug("add watch: %d", w);*/
204 if (unlikely(w == -1)) {
205 ajla_error_t e = error_from_errno(EC_SYSCALL, errno);
206 mutex_unlock(&inotify_wds_mutex);
207 fatal_mayfail(e, err, "inotify_add_watch failed: %s", error_decode(e));
208 return false;
211 e = tree_find_for_insert(&inotify_wds, inotify_wd_compare, w, &ins);
212 if (!e) {
213 wd = mem_alloc_mayfail(struct inotify_wd *, sizeof(struct inotify_wd), err);
214 if (unlikely(!wd)) {
215 int r;
216 EINTR_LOOP(r, inotify_rm_watch(inotify_fd, w));
217 /*debug("rm watch oom: %d", w);*/
218 if (unlikely(r == -1) && errno != EINVAL) {
219 int er = errno;
220 fatal("inotify_rm_watch failed: %d, %s", er, error_decode(error_from_errno(EC_SYSCALL, er)));
222 mutex_unlock(&inotify_wds_mutex);
223 return false;
225 list_init(&wd->wait_list);
226 wd->wd = w;
227 wd->refcount = 1;
228 wd->seq = 0;
229 tree_insert_after_find(&wd->entry, &ins);
230 } else {
231 wd = get_struct(e, struct inotify_wd, entry);
232 wd->refcount++;
235 *h = wd;
236 *seq = wd->seq;
238 mutex_unlock(&inotify_wds_mutex);
239 return true;
242 bool iomux_directory_handle_wait(notify_handle_t w, uint64_t seq, mutex_t **mutex_to_lock, struct list *list_entry)
244 struct inotify_wd *wd = w;
246 mutex_lock(&inotify_wds_mutex);
247 if (wd->seq != seq) {
248 mutex_unlock(&inotify_wds_mutex);
249 return true;
251 *mutex_to_lock = &inotify_wds_mutex;
252 list_add(&wd->wait_list, list_entry);
253 mutex_unlock(&inotify_wds_mutex);
254 return false;
257 void iomux_directory_handle_free(notify_handle_t w)
259 struct inotify_wd *wd = w;
261 mutex_lock(&inotify_wds_mutex);
262 if (!--wd->refcount) {
263 int r;
264 EINTR_LOOP(r, inotify_rm_watch(inotify_fd, wd->wd));
265 /*debug("rm watch: %d", wd->wd);*/
266 if (unlikely(r == -1) && errno != EINVAL) {
267 int er = errno;
268 fatal("inotify_rm_watch failed: %d, %s", er, error_decode(error_from_errno(EC_SYSCALL, er)));
270 tree_delete(&wd->entry);
271 mem_free(wd);
273 mutex_unlock(&inotify_wds_mutex);
276 #endif
279 void iomux_check_all(uint32_t us)
281 struct epoll_event events[EPOLL_MAX_EVENTS];
282 int n_ev, i;
283 int ms;
285 us = iomux_get_time(us);
286 /*debug("iomux_check_all: %u", us);
287 us = minimum(us, 1000000);*/
289 if (us != IOMUX_INDEFINITE_WAIT)
290 ms = (us + 999) / 1000;
291 else
292 ms = -1;
294 n_ev = epoll_wait(ep_fd, events, EPOLL_MAX_EVENTS, ms);
295 if (n_ev == -1) {
296 int er;
297 if (likely(errno == EINTR))
298 goto no_events;
299 er = errno;
300 fatal("epoll_wait failed: %d, %s", er, error_decode(error_from_errno(EC_SYSCALL, er)));
303 rwlock_lock_read(&iomux_rwlock);
304 for (i = 0; i < n_ev; i++) {
305 #ifndef EPOLLONESHOT
306 int r;
307 #endif
308 handle_t handle = events[i].data.fd;
309 struct iomux_wait *iow;
310 if (handle == os_notify_pipe[0]) {
311 #ifdef THREAD_NONE
312 os_drain_notify_pipe();
313 #endif
314 continue;
317 #ifdef IOMUX_EPOLL_INOTIFY
318 if (handle == inotify_fd) {
319 process_inotify();
320 continue;
322 #endif
323 iow = iowait_directory[handle];
325 address_lock(iow, DEPTH_THUNK);
326 #ifndef EPOLLONESHOT
327 EINTR_LOOP(r, epoll_ctl(ep_fd, EPOLL_CTL_DEL, handle, &events[i]));
328 if (unlikely(r == -1)) {
329 int er = errno;
330 fatal("epoll_ctl(EPOLL_CTL_DEL) failed: %d, %s", er, error_decode(error_from_errno(EC_SYSCALL, er)));
332 #endif
333 iow->event.events = 0;
334 call(wake_up_wait_list)(&iow->wait_list, address_get_mutex(iow, DEPTH_THUNK), true);
336 rwlock_unlock_read(&iomux_rwlock);
338 no_events:;
342 void iomux_init(void)
344 struct epoll_event pipe_ev;
345 int r;
346 rwlock_init(&iomux_rwlock);
347 array_init(struct iomux_wait *, &iowait_directory, &iowait_directory_size);
349 EINTR_LOOP(ep_fd, epoll_create(EPOLL_MAX_EVENTS));
350 if (unlikely(ep_fd == -1)) {
351 int er = errno;
352 fatal("epoll_create failed: %d, %s", er, error_decode(error_from_errno(EC_SYSCALL, er)));
354 os_set_cloexec(ep_fd);
355 obj_registry_insert(OBJ_TYPE_HANDLE, ep_fd, file_line);
357 #ifdef IOMUX_EPOLL_INOTIFY
358 tree_init(&inotify_wds);
359 mutex_init(&inotify_wds_mutex);
360 EINTR_LOOP(inotify_fd, inotify_init());
361 if (likely(inotify_fd != -1)) {
362 os_set_cloexec(inotify_fd);
363 obj_registry_insert(OBJ_TYPE_HANDLE, inotify_fd, file_line);
364 EINTR_LOOP(r, fcntl(inotify_fd, F_SETFL, O_NONBLOCK));
365 if (unlikely(r == -1)) {
366 int er = errno;
367 fatal("fcntl(F_SETFL, O_NONBLOCK) on an inotify descriptor failed: %d, %s", er, error_decode(error_from_errno(EC_SYSCALL, er)));
369 pipe_ev.events = EPOLLIN;
370 pipe_ev.data.fd = inotify_fd;
371 EINTR_LOOP(r, epoll_ctl(ep_fd, EPOLL_CTL_ADD, inotify_fd, &pipe_ev));
372 if (unlikely(r == -1)) {
373 int er = errno;
374 fatal("epoll_ctl(EPOLL_CTL_ADD) failed: %d, %s", er, error_decode(error_from_errno(EC_SYSCALL, er)));
377 #endif
379 pipe_ev.events = EPOLLIN;
380 pipe_ev.data.fd = os_notify_pipe[0];
381 EINTR_LOOP(r, epoll_ctl(ep_fd, EPOLL_CTL_ADD, os_notify_pipe[0], &pipe_ev));
382 if (unlikely(r == -1)) {
383 int er = errno;
384 fatal("epoll_ctl(EPOLL_CTL_ADD) failed: %d, %s", er, error_decode(error_from_errno(EC_SYSCALL, er)));
386 #ifndef THREAD_NONE
387 thread_spawn(&iomux_thread, iomux_poll_thread, NULL, PRIORITY_IO, NULL);
388 #endif
391 void iomux_done(void)
393 struct epoll_event pipe_ev;
394 int r;
395 size_t h;
396 os_shutdown_notify_pipe();
397 #ifndef THREAD_NONE
398 thread_join(&iomux_thread);
399 #endif
400 EINTR_LOOP(r, epoll_ctl(ep_fd, EPOLL_CTL_DEL, os_notify_pipe[0], &pipe_ev));
401 if (unlikely(r == -1)) {
402 int er = errno;
403 fatal("epoll_ctl(EPOLL_CTL_DEL) failed: %d, %s", er, error_decode(error_from_errno(EC_SYSCALL, er)));
406 #ifdef IOMUX_EPOLL_INOTIFY
407 if (likely(inotify_fd != -1)) {
408 EINTR_LOOP(r, epoll_ctl(ep_fd, EPOLL_CTL_DEL, inotify_fd, &pipe_ev));
409 if (unlikely(r == -1)) {
410 int er = errno;
411 fatal("epoll_ctl(EPOLL_CTL_DEL) failed: %d, %s", er, error_decode(error_from_errno(EC_SYSCALL, er)));
413 os_close(inotify_fd);
415 if (unlikely(!tree_is_empty(&inotify_wds)))
416 internal(file_line, "iomux_done: inotify tree is not empty");
417 mutex_done(&inotify_wds_mutex);
418 #endif
420 os_close(ep_fd);
422 for (h = 0; h < iowait_directory_size; h++)
423 if (iowait_directory[h])
424 mem_free(iowait_directory[h]);
425 mem_free(iowait_directory);
426 rwlock_done(&iomux_rwlock);
429 #endif