Merge branch 'upstream'
[nativeclient.git] / tools / libsrpc / accept_loop.c
blobbc8ce97d7356bf7b9d5ba41ffcf0a05e98f49449
1 /* @@rewrite(insert c-copyright) */
3 #include <stdlib.h>
4 #include <unistd.h>
5 #include "nacl_srpc.h"
6 #include "nacl_srpc_internal.h"
7 #include <sys/nacl_syscalls.h>
9 #define BOUND_SOCKET 3
11 extern int __srpc_get_fd();
13 static __thread int srpc_privileged = 1;
15 struct worker_state {
16 int d;
17 int is_privileged;
20 /**
21 * Handle a shutdown request from the user command channel. We should
22 * allow user code to override this to do exit processing -- for now,
23 * user code will have to use onexit or atexit to do cleanup, which is
24 * suboptimal in a multithreaded environment.
26 static int srpc_shutdown_request(NaClSrpcChannel* channel_instance_data,
27 NaClSrpcArg **in_args,
28 NaClSrpcArg **out_arg) {
29 if (srpc_privileged) {
31 * do onexit/atexit processing, then exit_group. really should do
32 * something sane wrt threads, but we do not implement thread
33 * cancellation (as yet?).
35 exit(0);
37 return NACL_SRPC_RESULT_OK;
40 NACL_SRPC_METHOD("__shutdown::", srpc_shutdown_request);
42 /**
43 * Basic SRPC worker thread: set the TLS variable srpc_privileged from
44 * worker_state argument, then run the NaClSrpcServerLoop on it.
46 static void *srpc_worker(void *arg) {
47 struct worker_state *state = (struct worker_state *) arg;
49 srpc_privileged = state->is_privileged;
50 NaClSrpcServerLoop(state->d, __kNaClSrpcHandlers, NULL);
52 (void) close(state->d);
53 if (srpc_privileged) {
54 _exit(0);
56 free(arg);
57 return 0;
60 /**
61 * Acceptor loop: accept client connections, and for each, spawn a
62 * worker thread that invokes NaClSrpcDefaultServerLoop.
64 static void *srpc_default_acceptor(void *arg) {
65 int first = (int) arg;
66 int d;
68 while (-1 != (d = imc_accept(BOUND_SOCKET))) {
69 struct worker_state *state = malloc(sizeof *state);
70 pthread_t worker_tid;
72 if (NULL == state) {
74 * shed load; the client can come back later when we have more
75 * memory.
77 (void) close(d);
78 continue;
80 state->d = d;
81 state->is_privileged = first;
82 /* worker thread is responsible for state and d. */
83 pthread_create(&worker_tid, NULL, srpc_worker, state);
84 first = 0;
86 return NULL;
89 static pthread_mutex_t mu = PTHREAD_MUTEX_INITIALIZER;
90 static pthread_cond_t cv = PTHREAD_COND_INITIALIZER;
91 static int multimedia_done = 0;
93 void srpc_multimedia_done() {
94 pthread_mutex_lock(&mu);
95 multimedia_done = 1;
96 pthread_cond_broadcast(&cv);
97 pthread_mutex_unlock(&mu);
101 * Internal SRPC initialization.
103 * We check to see if we are running embedded in the browser, and if
104 * so, do nacl_multimedia bridge first (which blocks until the plugin
105 * provides us with the shared memory handles). Then, we spawn a
106 * thread which is our main accept loop, allowing the main thread to
107 * continue to run and perform graphics operations, possibly also
108 * getting processing requests from clients via an event queue of some
109 * kind.
111 * The accept loop spawns worker threads on a per-client-connection
112 * basis. (We could use a thread pool, but do not at this point.)
113 * The worker threads just handle RPC requests using
114 * NaClSrpcDefaultServerLoop(). The first worker thread is
115 * "privileged", in that it is responsible for shutting down the NaCl
116 * app, and we expect that this first connection comes from the
117 * browser plugin.
119 void srpc_init() {
120 pthread_t acceptor_tid;
121 int is_embedded;
123 is_embedded = (__srpc_get_fd() != -1);
124 if (is_embedded) {
125 pthread_create(&acceptor_tid, NULL,
126 srpc_default_acceptor, (void *) 1);
127 pthread_detach(acceptor_tid);
129 pthread_mutex_lock(&mu);
130 while (!multimedia_done) {
131 pthread_cond_wait(&cv, &mu);
134 pthread_mutex_unlock(&mu);