Sync usage with man page.
[netbsd-mini2440.git] / external / gpl2 / lvm2 / dist / daemons / dmeventd / dmeventd.c
blob6f0114b5b736c1080da900f9178c6a0f30467d30
1 /* $NetBSD$ */
3 /*
4 * Copyright (C) 2005-2007 Red Hat, Inc. All rights reserved.
6 * This file is part of the device-mapper userspace tools.
8 * This copyrighted material is made available to anyone wishing to use,
9 * modify, copy, or redistribute it subject to the terms and conditions
10 * of the GNU Lesser General Public License v.2.1.
12 * You should have received a copy of the GNU Lesser General Public License
13 * along with this program; if not, write to the Free Software Foundation,
14 * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 * dmeventd - dm event daemon to monitor active mapped devices
21 #define _GNU_SOURCE
22 #define _FILE_OFFSET_BITS 64
24 #include "configure.h"
25 #include "libdevmapper.h"
26 #include "libdevmapper-event.h"
27 #include "dmeventd.h"
28 //#include "libmultilog.h"
29 #include "dm-logging.h"
31 #include <dlfcn.h>
32 #include <errno.h>
33 #include <pthread.h>
34 #include <sys/file.h>
35 #include <sys/stat.h>
36 #include <sys/wait.h>
37 #include <sys/time.h>
38 #include <sys/resource.h>
39 #include <unistd.h>
40 #include <signal.h>
41 #include <arpa/inet.h> /* for htonl, ntohl */
43 #ifdef linux
44 # include <malloc.h>
46 # define OOM_ADJ_FILE "/proc/self/oom_adj"
48 /* From linux/oom.h */
49 # define OOM_DISABLE (-17)
50 # define OOM_ADJUST_MIN (-16)
52 #endif
54 /* FIXME We use syslog for now, because multilog is not yet implemented */
55 #include <syslog.h>
57 static volatile sig_atomic_t _exit_now = 0; /* set to '1' when signal is given to exit */
58 static volatile sig_atomic_t _thread_registries_empty = 1; /* registries are empty initially */
59 static int _debug = 0;
61 /* List (un)link macros. */
62 #define LINK(x, head) dm_list_add(head, &(x)->list)
63 #define LINK_DSO(dso) LINK(dso, &_dso_registry)
64 #define LINK_THREAD(thread) LINK(thread, &_thread_registry)
66 #define UNLINK(x) dm_list_del(&(x)->list)
67 #define UNLINK_DSO(x) UNLINK(x)
68 #define UNLINK_THREAD(x) UNLINK(x)
70 #define DAEMON_NAME "dmeventd"
73 Global mutex for thread list access. Has to be held when:
74 - iterating thread list
75 - adding or removing elements from thread list
76 - changing or reading thread_status's fields:
77 processing, status, events
78 Use _lock_mutex() and _unlock_mutex() to hold/release it
80 static pthread_mutex_t _global_mutex;
83 There are three states a thread can attain (see struct
84 thread_status, field int status):
86 - DM_THREAD_RUNNING: thread has started up and is either working or
87 waiting for events... transitions to either SHUTDOWN or DONE
88 - DM_THREAD_SHUTDOWN: thread is still doing something, but it is
89 supposed to terminate (and transition to DONE) as soon as it
90 finishes whatever it was doing at the point of flipping state to
91 SHUTDOWN... the thread is still on the thread list
92 - DM_THREAD_DONE: thread has terminated and has been moved over to
93 unused thread list, cleanup pending
95 #define DM_THREAD_RUNNING 0
96 #define DM_THREAD_SHUTDOWN 1
97 #define DM_THREAD_DONE 2
99 #define THREAD_STACK_SIZE (300*1024)
101 #define DEBUGLOG(fmt, args...) _debuglog(fmt, ## args)
103 /* Data kept about a DSO. */
104 struct dso_data {
105 struct dm_list list;
107 char *dso_name; /* DSO name (eg, "evms", "dmraid", "lvm2"). */
109 void *dso_handle; /* Opaque handle as returned from dlopen(). */
110 unsigned int ref_count; /* Library reference count. */
113 * Event processing.
115 * The DSO can do whatever appropriate steps if an event
116 * happens such as changing the mapping in case a mirror
117 * fails, update the application metadata etc.
119 * This function gets a dm_task that is a result of
120 * DM_DEVICE_WAITEVENT ioctl (results equivalent to
121 * DM_DEVICE_STATUS). It should not destroy it.
122 * The caller must dispose of the task.
124 void (*process_event)(struct dm_task *dmt, enum dm_event_mask event, void **user);
127 * Device registration.
129 * When an application registers a device for an event, the DSO
130 * can carry out appropriate steps so that a later call to
131 * the process_event() function is sane (eg, read metadata
132 * and activate a mapping).
134 int (*register_device)(const char *device, const char *uuid, int major,
135 int minor, void **user);
138 * Device unregistration.
140 * In case all devices of a mapping (eg, RAID10) are unregistered
141 * for events, the DSO can recognize this and carry out appropriate
142 * steps (eg, deactivate mapping, metadata update).
144 int (*unregister_device)(const char *device, const char *uuid,
145 int major, int minor, void **user);
147 static DM_LIST_INIT(_dso_registry);
149 /* Structure to keep parsed register variables from client message. */
150 struct message_data {
151 char *id;
152 char *dso_name; /* Name of DSO. */
153 char *device_uuid; /* Mapped device path. */
154 union {
155 char *str; /* Events string as fetched from message. */
156 enum dm_event_mask field; /* Events bitfield. */
157 } events;
158 union {
159 char *str;
160 uint32_t secs;
161 } timeout;
162 struct dm_event_daemon_message *msg; /* Pointer to message buffer. */
166 * Housekeeping of thread+device states.
168 * One thread per mapped device which can block on it until an event
169 * occurs and the event processing function of the DSO gets called.
171 struct thread_status {
172 struct dm_list list;
174 pthread_t thread;
176 struct dso_data *dso_data; /* DSO this thread accesses. */
178 struct {
179 char *uuid;
180 char *name;
181 int major, minor;
182 } device;
183 uint32_t event_nr; /* event number */
184 int processing; /* Set when event is being processed */
186 int status; /* see DM_THREAD_{RUNNING,SHUTDOWN,DONE}
187 constants above */
188 enum dm_event_mask events; /* bitfield for event filter. */
189 enum dm_event_mask current_events; /* bitfield for occured events. */
190 struct dm_task *current_task;
191 time_t next_time;
192 uint32_t timeout;
193 struct dm_list timeout_list;
194 void *dso_private; /* dso per-thread status variable */
196 static DM_LIST_INIT(_thread_registry);
197 static DM_LIST_INIT(_thread_registry_unused);
199 static int _timeout_running;
200 static DM_LIST_INIT(_timeout_registry);
201 static pthread_mutex_t _timeout_mutex = PTHREAD_MUTEX_INITIALIZER;
202 static pthread_cond_t _timeout_cond = PTHREAD_COND_INITIALIZER;
204 static void _debuglog(const char *fmt, ...)
206 time_t P;
207 va_list ap;
209 if (!_debug)
210 return;
212 va_start(ap,fmt);
214 time(&P);
215 fprintf(stderr, "dmeventd[%p]: %.15s ", (void *) pthread_self(), ctime(&P)+4 );
216 vfprintf(stderr, fmt, ap);
217 fprintf(stderr, "\n");
219 va_end(ap);
222 /* Allocate/free the status structure for a monitoring thread. */
223 static struct thread_status *_alloc_thread_status(struct message_data *data,
224 struct dso_data *dso_data)
226 struct thread_status *ret = (typeof(ret)) dm_malloc(sizeof(*ret));
228 if (!ret)
229 return NULL;
231 memset(ret, 0, sizeof(*ret));
232 if (!(ret->device.uuid = dm_strdup(data->device_uuid))) {
233 dm_free(ret);
234 return NULL;
237 ret->current_task = NULL;
238 ret->device.name = NULL;
239 ret->device.major = ret->device.minor = 0;
240 ret->dso_data = dso_data;
241 ret->events = data->events.field;
242 ret->timeout = data->timeout.secs;
243 dm_list_init(&ret->timeout_list);
245 return ret;
248 static void _free_thread_status(struct thread_status *thread)
250 if (thread->current_task)
251 dm_task_destroy(thread->current_task);
252 dm_free(thread->device.uuid);
253 dm_free(thread->device.name);
254 dm_free(thread);
257 /* Allocate/free DSO data. */
258 static struct dso_data *_alloc_dso_data(struct message_data *data)
260 struct dso_data *ret = (typeof(ret)) dm_malloc(sizeof(*ret));
262 if (!ret)
263 return NULL;
265 memset(ret, 0, sizeof(*ret));
266 if (!(ret->dso_name = dm_strdup(data->dso_name))) {
267 dm_free(ret);
268 return NULL;
271 return ret;
274 /* Create a device monitoring thread. */
275 static int _pthread_create_smallstack(pthread_t *t, void *(*fun)(void *), void *arg)
277 pthread_attr_t attr;
278 pthread_attr_init(&attr);
280 * We use a smaller stack since it gets preallocated in its entirety
282 pthread_attr_setstacksize(&attr, THREAD_STACK_SIZE);
283 return pthread_create(t, &attr, fun, arg);
286 static void _free_dso_data(struct dso_data *data)
288 dm_free(data->dso_name);
289 dm_free(data);
293 * Fetch a string off src and duplicate it into *ptr.
294 * Pay attention to zero-length strings.
296 /* FIXME? move to libdevmapper to share with the client lib (need to
297 make delimiter a parameter then) */
298 static int _fetch_string(char **ptr, char **src, const int delimiter)
300 int ret = 0;
301 char *p;
302 size_t len;
304 if ((p = strchr(*src, delimiter)))
305 *p = 0;
307 if ((*ptr = dm_strdup(*src))) {
308 if ((len = strlen(*ptr)))
309 *src += len;
310 else {
311 dm_free(*ptr);
312 *ptr = NULL;
315 (*src)++;
316 ret = 1;
319 if (p)
320 *p = delimiter;
322 return ret;
325 /* Free message memory. */
326 static void _free_message(struct message_data *message_data)
328 if (message_data->id)
329 dm_free(message_data->id);
330 if (message_data->dso_name)
331 dm_free(message_data->dso_name);
333 if (message_data->device_uuid)
334 dm_free(message_data->device_uuid);
338 /* Parse a register message from the client. */
339 static int _parse_message(struct message_data *message_data)
341 int ret = 0;
342 char *p = message_data->msg->data;
343 struct dm_event_daemon_message *msg = message_data->msg;
345 if (!msg->data)
346 return 0;
349 * Retrieve application identifier, mapped device
350 * path and events # string from message.
352 if (_fetch_string(&message_data->id, &p, ' ') &&
353 _fetch_string(&message_data->dso_name, &p, ' ') &&
354 _fetch_string(&message_data->device_uuid, &p, ' ') &&
355 _fetch_string(&message_data->events.str, &p, ' ') &&
356 _fetch_string(&message_data->timeout.str, &p, ' ')) {
357 if (message_data->events.str) {
358 enum dm_event_mask i = atoi(message_data->events.str);
361 * Free string representaion of events.
362 * Not needed an more.
364 dm_free(message_data->events.str);
365 message_data->events.field = i;
367 if (message_data->timeout.str) {
368 uint32_t secs = atoi(message_data->timeout.str);
369 dm_free(message_data->timeout.str);
370 message_data->timeout.secs = secs ? secs :
371 DM_EVENT_DEFAULT_TIMEOUT;
374 ret = 1;
377 dm_free(msg->data);
378 msg->data = NULL;
379 msg->size = 0;
380 return ret;
383 /* Global mutex to lock access to lists et al. See _global_mutex
384 above. */
385 static int _lock_mutex(void)
387 return pthread_mutex_lock(&_global_mutex);
390 static int _unlock_mutex(void)
392 return pthread_mutex_unlock(&_global_mutex);
395 /* Store pid in pidfile. */
396 static int _storepid(int lf)
398 int len;
399 char pid[8];
401 if ((len = snprintf(pid, sizeof(pid), "%u\n", getpid())) < 0)
402 return 0;
404 if (len > (int) sizeof(pid))
405 len = (int) sizeof(pid);
407 if (write(lf, pid, (size_t) len) != len)
408 return 0;
410 fsync(lf);
412 return 1;
415 /* Check, if a device exists. */
416 static int _fill_device_data(struct thread_status *ts)
418 struct dm_task *dmt;
419 struct dm_info dmi;
421 if (!ts->device.uuid)
422 return 0;
424 ts->device.name = NULL;
425 ts->device.major = ts->device.minor = 0;
427 dmt = dm_task_create(DM_DEVICE_INFO);
428 if (!dmt)
429 return 0;
431 dm_task_set_uuid(dmt, ts->device.uuid);
432 if (!dm_task_run(dmt))
433 goto fail;
435 ts->device.name = dm_strdup(dm_task_get_name(dmt));
436 if (!ts->device.name)
437 goto fail;
439 if (!dm_task_get_info(dmt, &dmi))
440 goto fail;
442 ts->device.major = dmi.major;
443 ts->device.minor = dmi.minor;
445 dm_task_destroy(dmt);
446 return 1;
448 fail:
449 dm_task_destroy(dmt);
450 dm_free(ts->device.name);
451 return 0;
455 * Find an existing thread for a device.
457 * Mutex must be held when calling this.
459 static struct thread_status *_lookup_thread_status(struct message_data *data)
461 struct thread_status *thread;
463 dm_list_iterate_items(thread, &_thread_registry)
464 if (!strcmp(data->device_uuid, thread->device.uuid))
465 return thread;
467 return NULL;
470 /* Cleanup at exit. */
471 static void _exit_dm_lib(void)
473 dm_lib_release();
474 dm_lib_exit();
477 static void _exit_timeout(void *unused __attribute((unused)))
479 _timeout_running = 0;
480 pthread_mutex_unlock(&_timeout_mutex);
483 /* Wake up monitor threads every so often. */
484 static void *_timeout_thread(void *unused __attribute((unused)))
486 struct timespec timeout;
487 time_t curr_time;
489 timeout.tv_nsec = 0;
490 pthread_cleanup_push(_exit_timeout, NULL);
491 pthread_mutex_lock(&_timeout_mutex);
493 while (!dm_list_empty(&_timeout_registry)) {
494 struct thread_status *thread;
496 timeout.tv_sec = 0;
497 curr_time = time(NULL);
499 dm_list_iterate_items_gen(thread, &_timeout_registry, timeout_list) {
500 if (thread->next_time <= curr_time) {
501 thread->next_time = curr_time + thread->timeout;
502 pthread_kill(thread->thread, SIGALRM);
505 if (thread->next_time < timeout.tv_sec || !timeout.tv_sec)
506 timeout.tv_sec = thread->next_time;
509 pthread_cond_timedwait(&_timeout_cond, &_timeout_mutex,
510 &timeout);
513 pthread_cleanup_pop(1);
515 return NULL;
518 static int _register_for_timeout(struct thread_status *thread)
520 int ret = 0;
522 pthread_mutex_lock(&_timeout_mutex);
524 thread->next_time = time(NULL) + thread->timeout;
526 if (dm_list_empty(&thread->timeout_list)) {
527 dm_list_add(&_timeout_registry, &thread->timeout_list);
528 if (_timeout_running)
529 pthread_cond_signal(&_timeout_cond);
532 if (!_timeout_running) {
533 pthread_t timeout_id;
535 if (!(ret = -_pthread_create_smallstack(&timeout_id, _timeout_thread, NULL)))
536 _timeout_running = 1;
539 pthread_mutex_unlock(&_timeout_mutex);
541 return ret;
544 static void _unregister_for_timeout(struct thread_status *thread)
546 pthread_mutex_lock(&_timeout_mutex);
547 if (!dm_list_empty(&thread->timeout_list)) {
548 dm_list_del(&thread->timeout_list);
549 dm_list_init(&thread->timeout_list);
551 pthread_mutex_unlock(&_timeout_mutex);
554 static void _no_intr_log(int level, const char *file, int line,
555 const char *f, ...)
557 va_list ap;
559 if (errno == EINTR)
560 return;
561 if (level > _LOG_WARN)
562 return;
564 va_start(ap, f);
566 if (level < _LOG_WARN)
567 vfprintf(stderr, f, ap);
568 else
569 vprintf(f, ap);
571 va_end(ap);
573 if (level < _LOG_WARN)
574 fprintf(stderr, "\n");
575 else
576 fprintf(stdout, "\n");
579 static sigset_t _unblock_sigalrm(void)
581 sigset_t set, old;
583 sigemptyset(&set);
584 sigaddset(&set, SIGALRM);
585 pthread_sigmask(SIG_UNBLOCK, &set, &old);
586 return old;
589 #define DM_WAIT_RETRY 0
590 #define DM_WAIT_INTR 1
591 #define DM_WAIT_FATAL 2
593 /* Wait on a device until an event occurs. */
594 static int _event_wait(struct thread_status *thread, struct dm_task **task)
596 sigset_t set;
597 int ret = DM_WAIT_RETRY;
598 struct dm_task *dmt;
599 struct dm_info info;
601 *task = 0;
603 if (!(dmt = dm_task_create(DM_DEVICE_WAITEVENT)))
604 return DM_WAIT_RETRY;
606 thread->current_task = dmt;
608 if (!dm_task_set_uuid(dmt, thread->device.uuid) ||
609 !dm_task_set_event_nr(dmt, thread->event_nr))
610 goto out;
613 * This is so that you can break out of waiting on an event,
614 * either for a timeout event, or to cancel the thread.
616 set = _unblock_sigalrm();
617 dm_log_init(_no_intr_log);
618 errno = 0;
619 if (dm_task_run(dmt)) {
620 thread->current_events |= DM_EVENT_DEVICE_ERROR;
621 ret = DM_WAIT_INTR;
623 if ((ret = dm_task_get_info(dmt, &info)))
624 thread->event_nr = info.event_nr;
625 } else if (thread->events & DM_EVENT_TIMEOUT && errno == EINTR) {
626 thread->current_events |= DM_EVENT_TIMEOUT;
627 ret = DM_WAIT_INTR;
628 } else if (thread->status == DM_THREAD_SHUTDOWN && errno == EINTR) {
629 ret = DM_WAIT_FATAL;
630 } else {
631 syslog(LOG_NOTICE, "dm_task_run failed, errno = %d, %s",
632 errno, strerror(errno));
633 if (errno == ENXIO) {
634 syslog(LOG_ERR, "%s disappeared, detaching",
635 thread->device.name);
636 ret = DM_WAIT_FATAL;
640 pthread_sigmask(SIG_SETMASK, &set, NULL);
641 dm_log_init(NULL);
643 out:
644 if (ret == DM_WAIT_FATAL || ret == DM_WAIT_RETRY) {
645 dm_task_destroy(dmt);
646 thread->current_task = NULL;
647 } else
648 *task = dmt;
650 return ret;
653 /* Register a device with the DSO. */
654 static int _do_register_device(struct thread_status *thread)
656 return thread->dso_data->register_device(thread->device.name,
657 thread->device.uuid,
658 thread->device.major,
659 thread->device.minor,
660 &(thread->dso_private));
663 /* Unregister a device with the DSO. */
664 static int _do_unregister_device(struct thread_status *thread)
666 return thread->dso_data->unregister_device(thread->device.name,
667 thread->device.uuid,
668 thread->device.major,
669 thread->device.minor,
670 &(thread->dso_private));
673 /* Process an event in the DSO. */
674 static void _do_process_event(struct thread_status *thread, struct dm_task *task)
676 thread->dso_data->process_event(task, thread->current_events, &(thread->dso_private));
679 /* Thread cleanup handler to unregister device. */
680 static void _monitor_unregister(void *arg)
682 struct thread_status *thread = arg, *thread_iter;
684 if (!_do_unregister_device(thread))
685 syslog(LOG_ERR, "%s: %s unregister failed\n", __func__,
686 thread->device.name);
687 if (thread->current_task)
688 dm_task_destroy(thread->current_task);
689 thread->current_task = NULL;
691 _lock_mutex();
692 if (thread->events & DM_EVENT_TIMEOUT) {
693 /* _unregister_for_timeout locks another mutex, we
694 don't want to deadlock so we release our mutex for
695 a bit */
696 _unlock_mutex();
697 _unregister_for_timeout(thread);
698 _lock_mutex();
700 /* we may have been relinked to unused registry since we were
701 called, so check that */
702 dm_list_iterate_items(thread_iter, &_thread_registry_unused)
703 if (thread_iter == thread) {
704 thread->status = DM_THREAD_DONE;
705 _unlock_mutex();
706 return;
708 thread->status = DM_THREAD_DONE;
709 UNLINK_THREAD(thread);
710 LINK(thread, &_thread_registry_unused);
711 _unlock_mutex();
714 static struct dm_task *_get_device_status(struct thread_status *ts)
716 struct dm_task *dmt = dm_task_create(DM_DEVICE_STATUS);
718 if (!dmt)
719 return NULL;
721 dm_task_set_uuid(dmt, ts->device.uuid);
723 if (!dm_task_run(dmt)) {
724 dm_task_destroy(dmt);
725 return NULL;
728 return dmt;
731 /* Device monitoring thread. */
732 static void *_monitor_thread(void *arg)
734 struct thread_status *thread = arg;
735 int wait_error = 0;
736 struct dm_task *task;
738 pthread_setcanceltype(PTHREAD_CANCEL_DEFERRED, NULL);
739 pthread_cleanup_push(_monitor_unregister, thread);
741 /* Wait for do_process_request() to finish its task. */
742 _lock_mutex();
743 thread->status = DM_THREAD_RUNNING;
744 _unlock_mutex();
746 /* Loop forever awaiting/analyzing device events. */
747 while (1) {
748 thread->current_events = 0;
750 wait_error = _event_wait(thread, &task);
751 if (wait_error == DM_WAIT_RETRY)
752 continue;
754 if (wait_error == DM_WAIT_FATAL)
755 break;
757 /* Timeout occurred, task is not filled properly.
758 * We get device status here for processing it in DSO.
760 if (wait_error == DM_WAIT_INTR &&
761 thread->current_events & DM_EVENT_TIMEOUT) {
762 dm_task_destroy(task);
763 task = _get_device_status(thread);
764 /* FIXME: syslog fail here ? */
765 if (!(thread->current_task = task))
766 continue;
770 * We know that wait succeeded and stored a
771 * pointer to dm_task with device status into task.
775 * Check against filter.
777 * If there's current events delivered from _event_wait() AND
778 * the device got registered for those events AND
779 * those events haven't been processed yet, call
780 * the DSO's process_event() handler.
782 _lock_mutex();
783 if (thread->status == DM_THREAD_SHUTDOWN) {
784 _unlock_mutex();
785 break;
787 _unlock_mutex();
789 if (thread->events & thread->current_events) {
790 _lock_mutex();
791 thread->processing = 1;
792 _unlock_mutex();
794 _do_process_event(thread, task);
795 dm_task_destroy(task);
796 thread->current_task = NULL;
798 _lock_mutex();
799 thread->processing = 0;
800 _unlock_mutex();
801 } else {
802 dm_task_destroy(task);
803 thread->current_task = NULL;
807 pthread_cleanup_pop(1);
809 return NULL;
812 /* Create a device monitoring thread. */
813 static int _create_thread(struct thread_status *thread)
815 return _pthread_create_smallstack(&thread->thread, _monitor_thread, thread);
818 static int _terminate_thread(struct thread_status *thread)
820 return pthread_kill(thread->thread, SIGALRM);
823 /* DSO reference counting. Call with _global_mutex locked! */
824 static void _lib_get(struct dso_data *data)
826 data->ref_count++;
829 static void _lib_put(struct dso_data *data)
831 if (!--data->ref_count) {
832 dlclose(data->dso_handle);
833 UNLINK_DSO(data);
834 _free_dso_data(data);
838 /* Find DSO data. */
839 static struct dso_data *_lookup_dso(struct message_data *data)
841 struct dso_data *dso_data, *ret = NULL;
843 dm_list_iterate_items(dso_data, &_dso_registry)
844 if (!strcmp(data->dso_name, dso_data->dso_name)) {
845 _lib_get(dso_data);
846 ret = dso_data;
847 break;
850 return ret;
853 /* Lookup DSO symbols we need. */
854 static int _lookup_symbol(void *dl, void **symbol, const char *name)
856 if ((*symbol = dlsym(dl, name)))
857 return 1;
859 return 0;
862 static int lookup_symbols(void *dl, struct dso_data *data)
864 return _lookup_symbol(dl, (void *) &data->process_event,
865 "process_event") &&
866 _lookup_symbol(dl, (void *) &data->register_device,
867 "register_device") &&
868 _lookup_symbol(dl, (void *) &data->unregister_device,
869 "unregister_device");
872 /* Load an application specific DSO. */
873 static struct dso_data *_load_dso(struct message_data *data)
875 void *dl;
876 struct dso_data *ret = NULL;
878 if (!(dl = dlopen(data->dso_name, RTLD_NOW))) {
879 const char *dlerr = dlerror();
880 syslog(LOG_ERR, "dmeventd %s dlopen failed: %s", data->dso_name,
881 dlerr);
882 data->msg->size =
883 dm_asprintf(&(data->msg->data), "%s %s dlopen failed: %s",
884 data->id, data->dso_name, dlerr);
885 return NULL;
888 if (!(ret = _alloc_dso_data(data))) {
889 dlclose(dl);
890 return NULL;
893 if (!(lookup_symbols(dl, ret))) {
894 _free_dso_data(ret);
895 dlclose(dl);
896 return NULL;
900 * Keep handle to close the library once
901 * we've got no references to it any more.
903 ret->dso_handle = dl;
904 _lib_get(ret);
906 _lock_mutex();
907 LINK_DSO(ret);
908 _unlock_mutex();
910 return ret;
913 /* Return success on daemon active check. */
914 static int _active(struct message_data *message_data)
916 return 0;
920 * Register for an event.
922 * Only one caller at a time here, because we use
923 * a FIFO and lock it against multiple accesses.
925 static int _register_for_event(struct message_data *message_data)
927 int ret = 0;
928 struct thread_status *thread, *thread_new = NULL;
929 struct dso_data *dso_data;
931 if (!(dso_data = _lookup_dso(message_data)) &&
932 !(dso_data = _load_dso(message_data))) {
933 stack;
934 #ifdef ELIBACC
935 ret = -ELIBACC;
936 #else
937 ret = -ENODEV;
938 #endif
939 goto out;
942 /* Preallocate thread status struct to avoid deadlock. */
943 if (!(thread_new = _alloc_thread_status(message_data, dso_data))) {
944 stack;
945 ret = -ENOMEM;
946 goto out;
949 if (!_fill_device_data(thread_new)) {
950 stack;
951 ret = -ENODEV;
952 goto out;
955 _lock_mutex();
957 /* If creation of timeout thread fails (as it may), we fail
958 here completely. The client is responsible for either
959 retrying later or trying to register without timeout
960 events. However, if timeout thread cannot be started, it
961 usually means we are so starved on resources that we are
962 almost as good as dead already... */
963 if (thread_new->events & DM_EVENT_TIMEOUT) {
964 ret = -_register_for_timeout(thread_new);
965 if (ret) {
966 _unlock_mutex();
967 goto out;
971 if (!(thread = _lookup_thread_status(message_data))) {
972 _unlock_mutex();
974 if (!(ret = _do_register_device(thread_new)))
975 goto out;
977 thread = thread_new;
978 thread_new = NULL;
980 /* Try to create the monitoring thread for this device. */
981 _lock_mutex();
982 if ((ret = -_create_thread(thread))) {
983 _unlock_mutex();
984 _do_unregister_device(thread);
985 _free_thread_status(thread);
986 goto out;
987 } else
988 LINK_THREAD(thread);
991 /* Or event # into events bitfield. */
992 thread->events |= message_data->events.field;
994 _unlock_mutex();
996 out:
998 * Deallocate thread status after releasing
999 * the lock in case we haven't used it.
1001 if (thread_new)
1002 _free_thread_status(thread_new);
1004 return ret;
1008 * Unregister for an event.
1010 * Only one caller at a time here as with register_for_event().
1012 static int _unregister_for_event(struct message_data *message_data)
1014 int ret = 0;
1015 struct thread_status *thread;
1018 * Clear event in bitfield and deactivate
1019 * monitoring thread in case bitfield is 0.
1021 _lock_mutex();
1023 if (!(thread = _lookup_thread_status(message_data))) {
1024 _unlock_mutex();
1025 ret = -ENODEV;
1026 goto out;
1029 if (thread->status == DM_THREAD_DONE) {
1030 /* the thread has terminated while we were not
1031 watching */
1032 _unlock_mutex();
1033 return 0;
1036 thread->events &= ~message_data->events.field;
1038 if (!(thread->events & DM_EVENT_TIMEOUT))
1039 _unregister_for_timeout(thread);
1041 * In case there's no events to monitor on this device ->
1042 * unlink and terminate its monitoring thread.
1044 if (!thread->events) {
1045 UNLINK_THREAD(thread);
1046 LINK(thread, &_thread_registry_unused);
1048 _unlock_mutex();
1050 out:
1051 return ret;
1055 * Get registered device.
1057 * Only one caller at a time here as with register_for_event().
1059 static int _registered_device(struct message_data *message_data,
1060 struct thread_status *thread)
1062 struct dm_event_daemon_message *msg = message_data->msg;
1064 const char *fmt = "%s %s %s %u";
1065 const char *id = message_data->id;
1066 const char *dso = thread->dso_data->dso_name;
1067 const char *dev = thread->device.uuid;
1068 unsigned events = ((thread->status == DM_THREAD_RUNNING)
1069 && (thread->events)) ? thread->events : thread->
1070 events | DM_EVENT_REGISTRATION_PENDING;
1072 if (msg->data)
1073 dm_free(msg->data);
1075 msg->size = dm_asprintf(&(msg->data), fmt, id, dso, dev, events);
1077 _unlock_mutex();
1079 return 0;
1082 static int _want_registered_device(char *dso_name, char *device_uuid,
1083 struct thread_status *thread)
1085 /* If DSO names and device paths are equal. */
1086 if (dso_name && device_uuid)
1087 return !strcmp(dso_name, thread->dso_data->dso_name) &&
1088 !strcmp(device_uuid, thread->device.uuid) &&
1089 (thread->status == DM_THREAD_RUNNING ||
1090 (thread->events & DM_EVENT_REGISTRATION_PENDING));
1092 /* If DSO names are equal. */
1093 if (dso_name)
1094 return !strcmp(dso_name, thread->dso_data->dso_name) &&
1095 (thread->status == DM_THREAD_RUNNING ||
1096 (thread->events & DM_EVENT_REGISTRATION_PENDING));
1098 /* If device paths are equal. */
1099 if (device_uuid)
1100 return !strcmp(device_uuid, thread->device.uuid) &&
1101 (thread->status == DM_THREAD_RUNNING ||
1102 (thread->events & DM_EVENT_REGISTRATION_PENDING));
1104 return 1;
1107 static int _get_registered_dev(struct message_data *message_data, int next)
1109 struct thread_status *thread, *hit = NULL;
1111 _lock_mutex();
1113 /* Iterate list of threads checking if we want a particular one. */
1114 dm_list_iterate_items(thread, &_thread_registry)
1115 if (_want_registered_device(message_data->dso_name,
1116 message_data->device_uuid,
1117 thread)) {
1118 hit = thread;
1119 break;
1123 * If we got a registered device and want the next one ->
1124 * fetch next conforming element off the list.
1126 if (hit && !next) {
1127 _unlock_mutex();
1128 return _registered_device(message_data, hit);
1131 if (!hit)
1132 goto out;
1134 thread = hit;
1136 while (1) {
1137 if (dm_list_end(&_thread_registry, &thread->list))
1138 goto out;
1140 thread = dm_list_item(thread->list.n, struct thread_status);
1141 if (_want_registered_device(message_data->dso_name, NULL, thread)) {
1142 hit = thread;
1143 break;
1147 _unlock_mutex();
1148 return _registered_device(message_data, hit);
1150 out:
1151 _unlock_mutex();
1153 return -ENOENT;
1156 static int _get_registered_device(struct message_data *message_data)
1158 return _get_registered_dev(message_data, 0);
1161 static int _get_next_registered_device(struct message_data *message_data)
1163 return _get_registered_dev(message_data, 1);
1166 static int _set_timeout(struct message_data *message_data)
1168 struct thread_status *thread;
1170 _lock_mutex();
1171 if ((thread = _lookup_thread_status(message_data)))
1172 thread->timeout = message_data->timeout.secs;
1173 _unlock_mutex();
1175 return thread ? 0 : -ENODEV;
1178 static int _get_timeout(struct message_data *message_data)
1180 struct thread_status *thread;
1181 struct dm_event_daemon_message *msg = message_data->msg;
1183 if (msg->data)
1184 dm_free(msg->data);
1186 _lock_mutex();
1187 if ((thread = _lookup_thread_status(message_data))) {
1188 msg->size =
1189 dm_asprintf(&(msg->data), "%s %" PRIu32, message_data->id,
1190 thread->timeout);
1191 } else {
1192 msg->data = NULL;
1193 msg->size = 0;
1195 _unlock_mutex();
1197 return thread ? 0 : -ENODEV;
1200 /* Initialize a fifos structure with path names. */
1201 static void _init_fifos(struct dm_event_fifos *fifos)
1203 memset(fifos, 0, sizeof(*fifos));
1205 fifos->client_path = DM_EVENT_FIFO_CLIENT;
1206 fifos->server_path = DM_EVENT_FIFO_SERVER;
1209 /* Open fifos used for client communication. */
1210 static int _open_fifos(struct dm_event_fifos *fifos)
1212 /* Create fifos */
1213 if (((mkfifo(fifos->client_path, 0600) == -1) && errno != EEXIST) ||
1214 ((mkfifo(fifos->server_path, 0600) == -1) && errno != EEXIST)) {
1215 syslog(LOG_ERR, "%s: Failed to create a fifo.\n", __func__);
1216 stack;
1217 return -errno;
1220 struct stat st;
1222 /* Warn about wrong permissions if applicable */
1223 if ((!stat(fifos->client_path, &st)) && (st.st_mode & 0777) != 0600)
1224 syslog(LOG_WARNING, "Fixing wrong permissions on %s",
1225 fifos->client_path);
1227 if ((!stat(fifos->server_path, &st)) && (st.st_mode & 0777) != 0600)
1228 syslog(LOG_WARNING, "Fixing wrong permissions on %s",
1229 fifos->server_path);
1231 /* If they were already there, make sure permissions are ok. */
1232 if (chmod(fifos->client_path, 0600)) {
1233 syslog(LOG_ERR, "Unable to set correct file permissions on %s",
1234 fifos->client_path);
1235 return -errno;
1238 if (chmod(fifos->server_path, 0600)) {
1239 syslog(LOG_ERR, "Unable to set correct file permissions on %s",
1240 fifos->server_path);
1241 return -errno;
1244 /* Need to open read+write or we will block or fail */
1245 if ((fifos->server = open(fifos->server_path, O_RDWR)) < 0) {
1246 stack;
1247 return -errno;
1250 /* Need to open read+write for select() to work. */
1251 if ((fifos->client = open(fifos->client_path, O_RDWR)) < 0) {
1252 stack;
1253 close(fifos->server);
1254 return -errno;
1257 return 0;
1261 * Read message from client making sure that data is available
1262 * and a complete message is read. Must not block indefinitely.
1264 static int _client_read(struct dm_event_fifos *fifos,
1265 struct dm_event_daemon_message *msg)
1267 struct timeval t;
1268 unsigned bytes = 0;
1269 int ret = 0;
1270 fd_set fds;
1271 int header = 1;
1272 size_t size = 2 * sizeof(uint32_t); /* status + size */
1273 char *buf = alloca(size);
1275 msg->data = NULL;
1277 errno = 0;
1278 while (bytes < size && errno != EOF) {
1279 /* Watch client read FIFO for input. */
1280 FD_ZERO(&fds);
1281 FD_SET(fifos->client, &fds);
1282 t.tv_sec = 1;
1283 t.tv_usec = 0;
1284 ret = select(fifos->client + 1, &fds, NULL, NULL, &t);
1286 if (!ret && !bytes) /* nothing to read */
1287 return 0;
1289 if (!ret) /* trying to finish read */
1290 continue;
1292 if (ret < 0) /* error */
1293 return 0;
1295 ret = read(fifos->client, buf + bytes, size - bytes);
1296 bytes += ret > 0 ? ret : 0;
1297 if (bytes == 2 * sizeof(uint32_t) && header) {
1298 msg->cmd = ntohl(*((uint32_t *) buf));
1299 msg->size = ntohl(*((uint32_t *) buf + 1));
1300 buf = msg->data = dm_malloc(msg->size);
1301 size = msg->size;
1302 bytes = 0;
1303 header = 0;
1307 if (bytes != size) {
1308 if (msg->data)
1309 dm_free(msg->data);
1310 msg->data = NULL;
1311 msg->size = 0;
1314 return bytes == size;
1318 * Write a message to the client making sure that it is ready to write.
1320 static int _client_write(struct dm_event_fifos *fifos,
1321 struct dm_event_daemon_message *msg)
1323 unsigned bytes = 0;
1324 int ret = 0;
1325 fd_set fds;
1327 size_t size = 2 * sizeof(uint32_t) + msg->size;
1328 char *buf = alloca(size);
1330 *((uint32_t *)buf) = htonl(msg->cmd);
1331 *((uint32_t *)buf + 1) = htonl(msg->size);
1332 if (msg->data)
1333 memcpy(buf + 2 * sizeof(uint32_t), msg->data, msg->size);
1335 errno = 0;
1336 while (bytes < size && errno != EIO) {
1337 do {
1338 /* Watch client write FIFO to be ready for output. */
1339 FD_ZERO(&fds);
1340 FD_SET(fifos->server, &fds);
1341 } while (select(fifos->server + 1, NULL, &fds, NULL, NULL) !=
1344 ret = write(fifos->server, buf + bytes, size - bytes);
1345 bytes += ret > 0 ? ret : 0;
1348 return bytes == size;
1352 * Handle a client request.
1354 * We put the request handling functions into
1355 * a list because of the growing number.
1357 static int _handle_request(struct dm_event_daemon_message *msg,
1358 struct message_data *message_data)
1360 static struct {
1361 unsigned int cmd;
1362 int (*f)(struct message_data *);
1363 } requests[] = {
1364 { DM_EVENT_CMD_REGISTER_FOR_EVENT, _register_for_event},
1365 { DM_EVENT_CMD_UNREGISTER_FOR_EVENT, _unregister_for_event},
1366 { DM_EVENT_CMD_GET_REGISTERED_DEVICE, _get_registered_device},
1367 { DM_EVENT_CMD_GET_NEXT_REGISTERED_DEVICE,
1368 _get_next_registered_device},
1369 { DM_EVENT_CMD_SET_TIMEOUT, _set_timeout},
1370 { DM_EVENT_CMD_GET_TIMEOUT, _get_timeout},
1371 { DM_EVENT_CMD_ACTIVE, _active},
1372 }, *req;
1374 for (req = requests; req < requests + sizeof(requests); req++)
1375 if (req->cmd == msg->cmd)
1376 return req->f(message_data);
1378 return -EINVAL;
1381 /* Process a request passed from the communication thread. */
1382 static int _do_process_request(struct dm_event_daemon_message *msg)
1384 int ret;
1385 char *answer;
1386 static struct message_data message_data;
1388 /* Parse the message. */
1389 memset(&message_data, 0, sizeof(message_data));
1390 message_data.msg = msg;
1391 if (msg->cmd == DM_EVENT_CMD_HELLO) {
1392 ret = 0;
1393 answer = msg->data;
1394 if (answer) {
1395 msg->size = dm_asprintf(&(msg->data), "%s HELLO", answer);
1396 dm_free(answer);
1397 } else {
1398 msg->size = 0;
1399 msg->data = NULL;
1401 } else if (msg->cmd != DM_EVENT_CMD_ACTIVE && !_parse_message(&message_data)) {
1402 stack;
1403 ret = -EINVAL;
1404 } else
1405 ret = _handle_request(msg, &message_data);
1407 msg->cmd = ret;
1408 if (!msg->data)
1409 msg->size = dm_asprintf(&(msg->data), "%s %s", message_data.id, strerror(-ret));
1411 _free_message(&message_data);
1413 return ret;
1416 /* Only one caller at a time. */
1417 static void _process_request(struct dm_event_fifos *fifos)
1419 struct dm_event_daemon_message msg;
1421 memset(&msg, 0, sizeof(msg));
1424 * Read the request from the client (client_read, client_write
1425 * give true on success and false on failure).
1427 if (!_client_read(fifos, &msg))
1428 return;
1430 /* _do_process_request fills in msg (if memory allows for
1431 data, otherwise just cmd and size = 0) */
1432 _do_process_request(&msg);
1434 if (!_client_write(fifos, &msg))
1435 stack;
1437 if (msg.data)
1438 dm_free(msg.data);
1441 static void _cleanup_unused_threads(void)
1443 int ret;
1444 struct dm_list *l;
1445 struct thread_status *thread;
1447 _lock_mutex();
1448 while ((l = dm_list_first(&_thread_registry_unused))) {
1449 thread = dm_list_item(l, struct thread_status);
1450 if (thread->processing)
1451 break; /* cleanup on the next round */
1453 if (thread->status == DM_THREAD_RUNNING) {
1454 thread->status = DM_THREAD_SHUTDOWN;
1455 break;
1458 if (thread->status == DM_THREAD_SHUTDOWN) {
1459 if (!thread->events) {
1460 /* turn codes negative -- should we be returning this? */
1461 ret = _terminate_thread(thread);
1463 if (ret == ESRCH) {
1464 thread->status = DM_THREAD_DONE;
1465 } else if (ret) {
1466 syslog(LOG_ERR,
1467 "Unable to terminate thread: %s\n",
1468 strerror(-ret));
1469 stack;
1471 break;
1474 dm_list_del(l);
1475 syslog(LOG_ERR,
1476 "thread can't be on unused list unless !thread->events");
1477 thread->status = DM_THREAD_RUNNING;
1478 LINK_THREAD(thread);
1480 continue;
1483 if (thread->status == DM_THREAD_DONE) {
1484 dm_list_del(l);
1485 pthread_join(thread->thread, NULL);
1486 _lib_put(thread->dso_data);
1487 _free_thread_status(thread);
1491 _unlock_mutex();
1494 static void _sig_alarm(int signum __attribute((unused)))
1496 pthread_testcancel();
1499 /* Init thread signal handling. */
1500 static void _init_thread_signals(void)
1502 sigset_t my_sigset;
1503 struct sigaction act;
1505 memset(&act, 0, sizeof(act));
1506 act.sa_handler = _sig_alarm;
1507 sigaction(SIGALRM, &act, NULL);
1508 sigfillset(&my_sigset);
1510 /* These are used for exiting */
1511 sigdelset(&my_sigset, SIGTERM);
1512 sigdelset(&my_sigset, SIGINT);
1513 sigdelset(&my_sigset, SIGHUP);
1514 sigdelset(&my_sigset, SIGQUIT);
1516 pthread_sigmask(SIG_BLOCK, &my_sigset, NULL);
1520 * exit_handler
1521 * @sig
1523 * Set the global variable which the process should
1524 * be watching to determine when to exit.
1526 static void _exit_handler(int sig __attribute((unused)))
1529 * We exit when '_exit_now' is set.
1530 * That is, when a signal has been received.
1532 * We can not simply set '_exit_now' unless all
1533 * threads are done processing.
1535 if (!_thread_registries_empty) {
1536 syslog(LOG_ERR, "There are still devices being monitored.");
1537 syslog(LOG_ERR, "Refusing to exit.");
1538 } else
1539 _exit_now = 1;
1543 static int _lock_pidfile(void)
1545 int lf;
1546 char pidfile[] = DMEVENTD_PIDFILE;
1548 if ((lf = open(pidfile, O_CREAT | O_RDWR, 0644)) < 0)
1549 exit(EXIT_OPEN_PID_FAILURE);
1551 if (flock(lf, LOCK_EX | LOCK_NB) < 0)
1552 exit(EXIT_LOCKFILE_INUSE);
1554 if (!_storepid(lf))
1555 exit(EXIT_FAILURE);
1557 return 0;
1560 #ifdef linux
1562 * Protection against OOM killer if kernel supports it
1564 static int _set_oom_adj(int val)
1566 FILE *fp;
1568 struct stat st;
1570 if (stat(OOM_ADJ_FILE, &st) == -1) {
1571 if (errno == ENOENT)
1572 DEBUGLOG(OOM_ADJ_FILE " not found");
1573 else
1574 perror(OOM_ADJ_FILE ": stat failed");
1575 return 1;
1578 if (!(fp = fopen(OOM_ADJ_FILE, "w"))) {
1579 perror(OOM_ADJ_FILE ": fopen failed");
1580 return 0;
1583 fprintf(fp, "%i", val);
1584 if (dm_fclose(fp))
1585 perror(OOM_ADJ_FILE ": fclose failed");
1587 return 1;
1589 #endif
1591 static void _daemonize(void)
1593 int child_status;
1594 int fd;
1595 pid_t pid;
1596 struct rlimit rlim;
1597 struct timeval tval;
1598 sigset_t my_sigset;
1600 sigemptyset(&my_sigset);
1601 if (sigprocmask(SIG_SETMASK, &my_sigset, NULL) < 0) {
1602 fprintf(stderr, "Unable to restore signals.\n");
1603 exit(EXIT_FAILURE);
1605 signal(SIGTERM, &_exit_handler);
1607 switch (pid = fork()) {
1608 case -1:
1609 perror("fork failed:");
1610 exit(EXIT_FAILURE);
1612 case 0: /* Child */
1613 break;
1615 default:
1616 /* Wait for response from child */
1617 while (!waitpid(pid, &child_status, WNOHANG) && !_exit_now) {
1618 tval.tv_sec = 0;
1619 tval.tv_usec = 250000; /* .25 sec */
1620 select(0, NULL, NULL, NULL, &tval);
1623 if (_exit_now) /* Child has signaled it is ok - we can exit now */
1624 exit(EXIT_SUCCESS);
1626 /* Problem with child. Determine what it is by exit code */
1627 switch (WEXITSTATUS(child_status)) {
1628 case EXIT_LOCKFILE_INUSE:
1629 fprintf(stderr, "Another dmeventd daemon is already running\n");
1630 break;
1631 case EXIT_DESC_CLOSE_FAILURE:
1632 case EXIT_DESC_OPEN_FAILURE:
1633 case EXIT_OPEN_PID_FAILURE:
1634 case EXIT_FIFO_FAILURE:
1635 case EXIT_CHDIR_FAILURE:
1636 default:
1637 fprintf(stderr, "Child exited with code %d\n", WEXITSTATUS(child_status));
1638 break;
1641 exit(WEXITSTATUS(child_status));
1644 if (chdir("/"))
1645 exit(EXIT_CHDIR_FAILURE);
1647 if (getrlimit(RLIMIT_NOFILE, &rlim) < 0)
1648 fd = 256; /* just have to guess */
1649 else
1650 fd = rlim.rlim_cur;
1652 for (--fd; fd >= 0; fd--)
1653 close(fd);
1655 if ((open("/dev/null", O_RDONLY) < 0) ||
1656 (open("/dev/null", O_WRONLY) < 0) ||
1657 (open("/dev/null", O_WRONLY) < 0))
1658 exit(EXIT_DESC_OPEN_FAILURE);
1660 setsid();
1663 static void usage(char *prog, FILE *file)
1665 fprintf(file, "Usage:\n");
1666 fprintf(file, "%s [Vhd]\n", prog);
1667 fprintf(file, "\n");
1668 fprintf(file, " -V Show version of dmeventd\n");
1669 fprintf(file, " -h Show this help information\n");
1670 fprintf(file, " -d Don't fork, run in the foreground\n");
1671 fprintf(file, "\n");
1674 int main(int argc, char *argv[])
1676 int ret;
1677 signed char opt;
1678 struct dm_event_fifos fifos;
1679 //struct sys_log logdata = {DAEMON_NAME, LOG_DAEMON};
1681 opterr = 0;
1682 optind = 0;
1684 while ((opt = getopt(argc, argv, "?hVd")) != EOF) {
1685 switch (opt) {
1686 case 'h':
1687 usage(argv[0], stdout);
1688 exit(0);
1689 case '?':
1690 usage(argv[0], stderr);
1691 exit(0);
1692 case 'd':
1693 _debug++;
1694 break;
1695 case 'V':
1696 printf("dmeventd version: %s\n", DM_LIB_VERSION);
1697 exit(1);
1698 break;
1702 if (!_debug)
1703 _daemonize();
1705 openlog("dmeventd", LOG_PID, LOG_DAEMON);
1707 _lock_pidfile(); /* exits if failure */
1709 /* Set the rest of the signals to cause '_exit_now' to be set */
1710 signal(SIGINT, &_exit_handler);
1711 signal(SIGHUP, &_exit_handler);
1712 signal(SIGQUIT, &_exit_handler);
1714 #ifdef linux
1715 if (!_set_oom_adj(OOM_DISABLE) && !_set_oom_adj(OOM_ADJUST_MIN))
1716 syslog(LOG_ERR, "Failed to set oom_adj to protect against OOM killer");
1717 #endif
1719 _init_thread_signals();
1721 //multilog_clear_logging();
1722 //multilog_add_type(std_syslog, &logdata);
1723 //multilog_init_verbose(std_syslog, _LOG_DEBUG);
1724 //multilog_async(1);
1726 _init_fifos(&fifos);
1728 pthread_mutex_init(&_global_mutex, NULL);
1730 #ifdef MCL_CURRENT
1731 if (mlockall(MCL_CURRENT | MCL_FUTURE) == -1)
1732 exit(EXIT_FAILURE);
1733 #endif
1735 if ((ret = _open_fifos(&fifos)))
1736 exit(EXIT_FIFO_FAILURE);
1738 /* Signal parent, letting them know we are ready to go. */
1739 kill(getppid(), SIGTERM);
1740 syslog(LOG_NOTICE, "dmeventd ready for processing.");
1742 while (!_exit_now) {
1743 _process_request(&fifos);
1744 _cleanup_unused_threads();
1745 if (!dm_list_empty(&_thread_registry)
1746 || !dm_list_empty(&_thread_registry_unused))
1747 _thread_registries_empty = 0;
1748 else
1749 _thread_registries_empty = 1;
1752 _exit_dm_lib();
1754 #ifdef MCL_CURRENT
1755 munlockall();
1756 #endif
1757 pthread_mutex_destroy(&_global_mutex);
1759 syslog(LOG_NOTICE, "dmeventd shutting down.");
1760 closelog();
1762 exit(EXIT_SUCCESS);