Drop main() prototype. Syncs with NetBSD-8
[minix.git] / minix / servers / pm / event.c
blobc0a9b96e6a681ce8dfe1401780e94c9889c410a5
1 /*
2 * This file implements a generic process event publish/subscribe facility.
3 * The facility for use by non-core system services that implement part of the
4 * userland system call interface. Currently, it supports two events: a
5 * process catching a signal, and a process being terminated. A subscribing
6 * service would typically use such events to interrupt a blocking system call
7 * and/or clean up process-bound resources. As of writing, the only service
8 * that uses this facility is the System V IPC server.
10 * Each of these events will be published to subscribing services right after
11 * VFS has acknowledged that it has processed the same event. For each
12 * subscriber, in turn, the process will be blocked (with the EVENT_CALL flag
13 * set) until the subscriber acknowledges the event or PM learns that the
14 * subscriber has died. Thus, each subscriber adds a serialized messaging
15 * roundtrip for each subscribed event.
17 * The one and only reason for this synchronous, serialized approach is that it
18 * avoids PM queuing up too many asynchronous messages. In theory, each
19 * running process may have an event pending, and thus, the serial synchronous
20 * approach requires NR_PROCS asynsend slots. For a parallel synchronous
21 * approach, this would increase to (NR_PROCS*NR_SUBS). Worse yet, for an
22 * asynchronous event notification approach, the number of messages that PM can
23 * end up queuing is potentially unbounded, so that is certainly not an option.
24 * At this moment, we expect only one subscriber (the IPC server) which makes
25 * the serial vs parallel point less relevant.
27 * It is not possible to subscribe to events from certain processes only. If
28 * a service were to subscribe to process events as part of a system call by
29 * a process (e.g., semop(2) in the case of the IPC server), it may subscribe
30 * "too late" and already have missed a signal event for the process calling
31 * semop(2), for example. Resolving such race conditions would require major
32 * infrastructure changes.
34 * A server may however change its event subscription mask at runtime, so as to
35 * limit the number of event messages it receives in a crude fashion. For the
36 * same race-condition reasons, new subscriptions must always be made when
37 * processing a message that is *not* a system call potentially affected by
38 * events. In the case of the IPC server, it may subscribe to events from
39 * semget(2) but not semop(2). For signal events, the delay call system
40 * guarantees the safety of this approach; for exit events, the message type
41 * prioritization does (which is not great; see the TODO item in forkexit.c).
43 * After changing its mask, a subscribing service may still receive messages
44 * for events it is no longer subscribed to. It should acknowledge these
45 * messages by sending a reply as usual.
48 #include "pm.h"
49 #include "mproc.h"
50 #include <assert.h>
53 * A realistic upper bound for the number of subscribing services. The process
54 * event notification system adds a round trip to a service for each subscriber
55 * and uses asynchronous messaging to boot, so clearly it does not scale to
56 * numbers larger than this.
58 #define NR_SUBS 4
60 static struct {
61 endpoint_t endpt; /* endpoint of subscriber */
62 unsigned int mask; /* interests bit mask (PROC_EVENT_) */
63 unsigned int waiting; /* # procs blocked on reply from it */
64 } subs[NR_SUBS];
66 static unsigned int nsubs = 0;
67 static unsigned int nested = 0;
70 * For the current event of the given process, as determined by its flags, send
71 * a process event message to the next subscriber, or resume handling the
72 * event itself if there are no more subscribers to notify.
74 static void
75 resume_event(struct mproc * rmp)
77 message m;
78 unsigned int i, event;
79 int r;
81 assert(rmp->mp_flags & IN_USE);
82 assert(rmp->mp_flags & EVENT_CALL);
83 assert(rmp->mp_eventsub != NO_EVENTSUB);
85 /* Which event should we be concerned about? */
86 if (rmp->mp_flags & EXITING)
87 event = PROC_EVENT_EXIT;
88 else if (rmp->mp_flags & UNPAUSED)
89 event = PROC_EVENT_SIGNAL;
90 else
91 panic("unknown event for flags %x", rmp->mp_flags);
94 * If there are additional services interested in this event, send a
95 * message to the next one.
97 for (i = rmp->mp_eventsub; i < nsubs; i++, rmp->mp_eventsub++) {
98 if (subs[i].mask & event) {
99 memset(&m, 0, sizeof(m));
100 m.m_type = PROC_EVENT;
101 m.m_pm_lsys_proc_event.endpt = rmp->mp_endpoint;
102 m.m_pm_lsys_proc_event.event = event;
104 r = asynsend3(subs[i].endpt, &m, AMF_NOREPLY);
105 if (r != OK)
106 panic("asynsend failed: %d", r);
108 assert(subs[i].waiting < NR_PROCS);
109 subs[i].waiting++;
111 return;
115 /* No more subscribers to be notified, resume the actual event. */
116 rmp->mp_flags &= ~EVENT_CALL;
117 rmp->mp_eventsub = NO_EVENTSUB;
119 if (event == PROC_EVENT_EXIT)
120 exit_restart(rmp);
121 else if (event == PROC_EVENT_SIGNAL)
122 restart_sigs(rmp);
126 * Remove a subscriber from the set, forcefully if we have to. Ensure that
127 * any processes currently subject to process event notification are updated
128 * accordingly, in a way that no services are skipped for process events.
130 static void
131 remove_sub(unsigned int slot)
133 struct mproc *rmp;
134 unsigned int i;
136 /* The loop below needs the remaining items to be kept in order. */
137 for (i = slot; i < nsubs - 1; i++)
138 subs[i] = subs[i + 1];
139 nsubs--;
141 /* Adjust affected processes' event subscriber indexes to match. */
142 for (rmp = &mproc[0]; rmp < &mproc[NR_PROCS]; rmp++) {
143 if ((rmp->mp_flags & (IN_USE | EVENT_CALL)) !=
144 (IN_USE | EVENT_CALL))
145 continue;
146 assert(rmp->mp_eventsub != NO_EVENTSUB);
149 * While resuming a process could trigger new events, event
150 * calls always take place after the corresponding VFS calls,
151 * making this nesting-safe. Check anyway, because if nesting
152 * does occur, we are in serious (un-debuggable) trouble.
154 if ((unsigned int)rmp->mp_eventsub == slot) {
155 nested++;
156 resume_event(rmp);
157 nested--;
158 } else if ((unsigned int)rmp->mp_eventsub > slot)
159 rmp->mp_eventsub--;
164 * Subscribe to process events. The given event mask denotes the events in
165 * which the caller is interested. Multiple calls will each replace the mask,
166 * and a mask of zero will unsubscribe the service from events altogether.
167 * Return OK on success, EPERM if the caller may not register for events, or
168 * ENOMEM if all subscriber slots are in use already.
171 do_proceventmask(void)
173 unsigned int i, mask;
175 /* This call is for system services only. */
176 if (!(mp->mp_flags & PRIV_PROC))
177 return EPERM;
179 mask = m_in.m_lsys_pm_proceventmask.mask;
182 * First check if we need to update or remove an existing entry.
183 * We cannot actually remove services for which we are still waiting
184 * for a reply, so set their mask to zero for later removal instead.
186 for (i = 0; i < nsubs; i++) {
187 if (subs[i].endpt == who_e) {
188 if (mask == 0 && subs[i].waiting == 0)
189 remove_sub(i);
190 else
191 subs[i].mask = mask;
192 return OK;
196 /* Add a new entry, unless the given mask is empty. */
197 if (mask == 0)
198 return OK;
200 /* This case should never trigger. */
201 if (nsubs == __arraycount(subs)) {
202 printf("PM: too many process event subscribers!\n");
203 return ENOMEM;
206 subs[nsubs].endpt = who_e;
207 subs[nsubs].mask = mask;
208 nsubs++;
210 return OK;
214 * A subscribing service has replied to a process event message from us, or at
215 * least that is what should have happened. First make sure of this, and then
216 * resume event handling for the affected process.
219 do_proc_event_reply(void)
221 struct mproc *rmp;
222 endpoint_t endpt;
223 unsigned int i, event;
224 int slot;
226 assert(nested == 0);
229 * Is this an accidental call from a misguided user process?
230 * Politely tell it to go away.
232 if (!(mp->mp_flags & PRIV_PROC))
233 return ENOSYS;
236 * Ensure that we got the reply that we want. Since this code is
237 * relatively new, produce lots of warnings for cases that should never
238 * or rarely occur. Later we can just ignore all mismatching replies.
240 endpt = m_in.m_pm_lsys_proc_event.endpt;
241 if (pm_isokendpt(endpt, &slot) != OK) {
242 printf("PM: proc event reply from %d for invalid endpt %d\n",
243 who_e, endpt);
244 return SUSPEND;
246 rmp = &mproc[slot];
247 if (!(rmp->mp_flags & EVENT_CALL)) {
248 printf("PM: proc event reply from %d for endpt %d, no event\n",
249 who_e, endpt);
250 return SUSPEND;
252 if (rmp->mp_eventsub == NO_EVENTSUB ||
253 (unsigned int)rmp->mp_eventsub >= nsubs) {
254 printf("PM: proc event reply from %d for endpt %d index %d\n",
255 who_e, endpt, rmp->mp_eventsub);
256 return SUSPEND;
258 i = rmp->mp_eventsub;
259 if (subs[i].endpt != who_e) {
260 printf("PM: proc event reply for %d from %d instead of %d\n",
261 endpt, who_e, subs[i].endpt);
262 return SUSPEND;
265 if (rmp->mp_flags & EXITING)
266 event = PROC_EVENT_EXIT;
267 else if (rmp->mp_flags & UNPAUSED)
268 event = PROC_EVENT_SIGNAL;
269 else {
270 printf("PM: proc event reply from %d for %d, bad flags %x\n",
271 who_e, endpt, rmp->mp_flags);
272 return SUSPEND;
274 if (m_in.m_pm_lsys_proc_event.event != event) {
275 printf("PM: proc event reply from %d for %d for event %d "
276 "instead of %d\n", who_e, endpt,
277 m_in.m_pm_lsys_proc_event.event, event);
278 return SUSPEND;
281 * Do NOT check the event against the subscriber's event mask, since a
282 * service may have unsubscribed from an event while it has yet to
283 * process some leftover notifications for that event. We could decide
284 * not to wait for the replies to those leftover notifications upon
285 * unsubscription, but that could result in problems upon quick
286 * resubscription, and such cases may in fact happen in practice.
289 assert(subs[i].waiting > 0);
290 subs[i].waiting--;
293 * If we are now no longer waiting for any replies from an already
294 * unsubscribed (but alive) service, remove it from the set now; this
295 * will also resume events for the current process. In the normal case
296 * however, let the current process move on to the next subscriber if
297 * there are more, and the actual event otherwise.
299 if (subs[i].mask == 0 && subs[i].waiting == 0) {
300 remove_sub(i);
301 } else {
302 rmp->mp_eventsub++;
304 resume_event(rmp);
307 /* In any case, do not reply to this reply message. */
308 return SUSPEND;
312 * Publish a process event to interested subscribers. The event is determined
313 * from the process flags. In addition, if the event is a process exit, also
314 * check if it is a subscribing service that died.
316 void
317 publish_event(struct mproc * rmp)
319 unsigned int i;
321 assert(nested == 0);
322 assert((rmp->mp_flags & (IN_USE | EVENT_CALL)) == IN_USE);
323 assert(rmp->mp_eventsub == NO_EVENTSUB);
326 * If a system service exited, we have to check if it was subscribed to
327 * process events. If so, we have to remove it from the set and resume
328 * any processes blocked on an event call to that service.
330 if ((rmp->mp_flags & (PRIV_PROC | EXITING)) == (PRIV_PROC | EXITING)) {
331 for (i = 0; i < nsubs; i++) {
332 if (subs[i].endpt == rmp->mp_endpoint) {
334 * If the wait count is nonzero, we may or may
335 * not get additional replies from this service
336 * later. Those will be ignored.
338 remove_sub(i);
340 break;
346 * Either send an event message to the first subscriber, or if there
347 * are no subscribers, resume processing the event right away.
349 rmp->mp_flags |= EVENT_CALL;
350 rmp->mp_eventsub = 0;
352 resume_event(rmp);