Bug 449371 Firefox/Thunderbird crashes at exit [@ gdk_display_x11_finalize], p=Brian...
[wine-gecko.git] / nsprpub / pr / tests / sel_spd.c
blobc3b79652872c50b7ef65b9694daf8cce44c35fa1
1 /* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2 /* ***** BEGIN LICENSE BLOCK *****
3 * Version: MPL 1.1/GPL 2.0/LGPL 2.1
5 * The contents of this file are subject to the Mozilla Public License Version
6 * 1.1 (the "License"); you may not use this file except in compliance with
7 * the License. You may obtain a copy of the License at
8 * http://www.mozilla.org/MPL/
10 * Software distributed under the License is distributed on an "AS IS" basis,
11 * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
12 * for the specific language governing rights and limitations under the
13 * License.
15 * The Original Code is the Netscape Portable Runtime (NSPR).
17 * The Initial Developer of the Original Code is
18 * Netscape Communications Corporation.
19 * Portions created by the Initial Developer are Copyright (C) 1998-2000
20 * the Initial Developer. All Rights Reserved.
22 * Contributor(s):
24 * Alternatively, the contents of this file may be used under the terms of
25 * either the GNU General Public License Version 2 or later (the "GPL"), or
26 * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
27 * in which case the provisions of the GPL or the LGPL are applicable instead
28 * of those above. If you wish to allow use of your version of this file only
29 * under the terms of either the GPL or the LGPL, and not to allow others to
30 * use your version of this file under the terms of the MPL, indicate your
31 * decision by deleting the provisions above and replace them with the notice
32 * and other provisions required by the GPL or the LGPL. If you do not delete
33 * the provisions above, a recipient may use your version of this file under
34 * the terms of any one of the MPL, the GPL or the LGPL.
36 * ***** END LICENSE BLOCK ***** */
39 * Test the speed of select within NSPR
43 #include "nspr.h"
44 #include "prpriv.h"
46 #include <stdlib.h>
47 #include <stdio.h>
48 #include <errno.h>
49 #include <string.h>
51 #ifdef XP_MAC
52 #include "prlog.h"
53 int fprintf(FILE *stream, const char *fmt, ...)
55 PR_LogPrint(fmt);
56 return 0;
58 #define printf PR_LogPrint
59 extern void SetupMacPrintfLog(char *logFile);
60 #endif
62 #define PORT_BASE 19000
64 typedef struct timer_slot_t {
65 unsigned long d_connect;
66 unsigned long d_cl_data;
67 unsigned long d_sv_data;
68 unsigned long d_close;
69 unsigned long d_total;
70 unsigned long requests;
71 } timer_slot_t;
73 static long _iterations = 5;
74 static long _client_data = 8192;
76 #if defined(XP_MAC)
78 * Mac does not scale well specially the requirement for thread stack
79 * space and buffer allocation space. It is easy to get into a fragmented
80 * memory and not be able to allocate thread stack or client/server data
81 * buffer.
83 static long _server_data = (8*1024);
84 static long _threads_max = 10, _threads = 10;
85 #else
86 static long _server_data = (128*1024);
87 static long _threads_max = 10, _threads = 10;
88 #endif
90 static int verbose=0;
91 static PRMonitor *exit_cv;
92 static long _thread_exit_count;
93 static timer_slot_t *timer_data;
94 static PRThreadScope scope1, scope2;
96 void tally_results(int);
98 /* return the diff in microseconds */
99 unsigned long _delta(PRIntervalTime *start, PRIntervalTime *stop)
102 * Will C do the right thing with unsigned arithemtic?
104 return PR_IntervalToMicroseconds(*stop - *start);
107 int _readn(PRFileDesc *sock, char *buf, int len)
109 int rem;
110 int bytes;
112 for (rem=len; rem; rem -= bytes) {
113 bytes = PR_Recv(sock, buf+len-rem, rem, 0, PR_INTERVAL_NO_TIMEOUT);
114 if (bytes <= 0)
115 return -1;
117 return len;
120 void
121 _thread_exit(int id)
123 PR_EnterMonitor(exit_cv);
124 #ifdef DEBUG
125 fprintf(stdout, "Thread %d EXIT\n", id);
126 #endif
128 _thread_exit_count--;
129 if (_thread_exit_count == 0) {
130 #ifdef DEBUG
131 fprintf(stdout, "Thread %d EXIT triggered notify\n", id);
132 #endif
133 PR_Notify(exit_cv);
135 PR_ExitMonitor(exit_cv);
138 void
139 _server_thread(void *arg_id)
141 void _client_thread(void *);
142 PRThread *thread;
143 int *id = (int *)arg_id;
144 PRFileDesc *sock;
145 PRSocketOptionData sockopt;
146 PRNetAddr sa;
147 PRFileDesc * newsock;
148 char *data_buffer = NULL;
149 int data_buffer_size;
150 int index;
151 PRIntervalTime start,
152 connect_done,
153 read_done,
154 write_done,
155 close_done;
158 #ifdef DEBUG
159 fprintf(stdout, "server thread %d alive\n", *id);
160 #endif
162 data_buffer_size = (_client_data>_server_data?_client_data:_server_data);
164 if ( (data_buffer = (char *)PR_Malloc(data_buffer_size * sizeof(char))) == NULL ) {
165 fprintf(stderr, "Error creating buffer in server thread %d\n", *id);
166 goto done;
170 if ( (sock = PR_NewTCPSocket()) == NULL) {
171 fprintf(stderr, "Error creating socket in server thread %d\n", *id);
172 goto done;
175 sockopt.option = PR_SockOpt_Reuseaddr;
176 sockopt.value.reuse_addr = PR_TRUE;
177 if ( PR_SetSocketOption(sock, &sockopt) == PR_FAILURE) {
178 fprintf(stderr, "Error setting socket option in server thread %d\n", *id);
179 goto done;
182 memset(&sa, 0 , sizeof(sa));
183 sa.inet.family = PR_AF_INET;
184 sa.inet.port = PR_htons(PORT_BASE + *id);
185 sa.inet.ip = PR_htonl(PR_INADDR_ANY);
187 if ( PR_Bind(sock, &sa) < 0) {
188 fprintf(stderr, "Error binding socket in server thread %d errno = %d\n", *id, errno);
189 goto done;
192 if ( PR_Listen(sock, 32) < 0 ) {
193 fprintf(stderr, "Error listening to socket in server thread %d\n", *id);
194 goto done;
197 /* Tell the client to start */
198 if ( (thread = PR_CreateThread(PR_USER_THREAD,
199 _client_thread,
200 id,
201 PR_PRIORITY_NORMAL,
202 scope2,
203 PR_UNJOINABLE_THREAD,
204 0)) == NULL)
205 fprintf(stderr, "Error creating client thread %d\n", *id);
207 for (index = 0; index< _iterations; index++) {
209 #ifdef DEBUG
210 fprintf(stdout, "server thread %d loop %d\n", *id, index);
211 #endif
213 start = PR_IntervalNow();
215 if ( (newsock = PR_Accept(sock, &sa,
216 PR_INTERVAL_NO_TIMEOUT)) == NULL) {
217 fprintf(stderr, "Error accepting connection %d in server thread %d\n",
218 index, *id);
219 goto done;
221 #ifdef DEBUG
222 fprintf(stdout, "server thread %d got connection %d\n", *id, newsock);
223 #endif
226 connect_done = PR_IntervalNow();
228 if ( _readn(newsock, data_buffer, _client_data) < _client_data) {
229 fprintf(stderr, "Error reading client data for iteration %d in server thread %d\n", index, *id );
230 goto done;
233 #ifdef DEBUG
234 fprintf(stdout, "server thread %d read %d bytes\n", *id, _client_data);
235 #endif
236 read_done = PR_IntervalNow();
238 if ( PR_Send(newsock, data_buffer, _server_data, 0,
239 PR_INTERVAL_NO_TIMEOUT) < _server_data) {
240 fprintf(stderr, "Error sending client data for iteration %d in server thread %d\n", index, *id );
241 goto done;
244 #ifdef DEBUG
245 fprintf(stdout, "server thread %d write %d bytes\n", *id, _server_data);
246 #endif
248 write_done = PR_IntervalNow();
250 PR_Close(newsock);
252 close_done = PR_IntervalNow();
254 timer_data[2*(*id)].d_connect += _delta(&start, &connect_done);
255 timer_data[2*(*id)].d_cl_data += _delta(&connect_done, &read_done);
256 timer_data[2*(*id)].d_sv_data += _delta(&read_done, &write_done);
257 timer_data[2*(*id)].d_close += _delta(&write_done, &close_done);
258 timer_data[2*(*id)].d_total += _delta(&start, &close_done);
259 timer_data[2*(*id)].requests++;
262 #ifdef DEBUG
263 fprintf(stdout, "server: %d %d %d %d %d\n",
264 _delta(&start, &connect_done), _delta(&connect_done, &read_done),
265 _delta(&read_done, &write_done), _delta(&write_done, &close_done),
266 _delta(&start, &close_done));
267 #endif
270 done:
271 if (data_buffer != NULL) PR_Free (data_buffer);
272 if (sock) PR_Close(sock);
273 _thread_exit(*id);
274 return;
277 void
278 _client_thread(void *arg_id)
280 int *id = (int *)arg_id;
281 int index;
282 PRNetAddr sa;
283 PRFileDesc *sock_h;
284 char *data_buffer = NULL;
285 int data_buffer_size;
286 int bytes;
287 PRIntervalTime start,
288 connect_done,
289 read_done,
290 write_done,
291 close_done;
292 PRStatus rv;
294 #ifdef DEBUG
295 fprintf(stdout, "client thread %d alive\n", *id);
296 #endif
298 data_buffer_size = (_client_data>_server_data?_client_data:_server_data);
300 if ( (data_buffer = (char *)PR_Malloc(data_buffer_size * sizeof(char))) == NULL) {
301 fprintf(stderr, "Error creating buffer in server thread %d\n", *id);
302 goto done;
305 memset(&sa, 0 , sizeof(sa));
306 rv = PR_InitializeNetAddr(PR_IpAddrLoopback, PORT_BASE + *id, &sa);
307 PR_ASSERT(PR_SUCCESS == rv);
309 for (index = 0; index< _iterations; index++) {
311 #ifdef DEBUG
312 fprintf(stdout, "client thread %d loop %d\n", *id, index);
313 #endif
315 start = PR_IntervalNow();
316 if ( (sock_h = PR_NewTCPSocket()) == NULL) {
317 fprintf(stderr, "Error creating socket %d in client thread %d\n",
318 index, *id);
319 goto done;
322 #ifdef DEBUG
323 fprintf(stdout, "client thread %d socket created %d\n", *id, sock_h);
324 #endif
326 if ( PR_Connect(sock_h, &sa,
327 PR_INTERVAL_NO_TIMEOUT) < 0) {
328 fprintf(stderr, "Error accepting connection %d in client thread %d\n",
329 index, *id);
330 goto done;
333 #ifdef DEBUG
334 fprintf(stdout, "client thread %d socket connected %d\n", *id, sock_h);
335 #endif
337 connect_done = PR_IntervalNow();
338 if ( PR_Send(sock_h, data_buffer, _client_data, 0,
339 PR_INTERVAL_NO_TIMEOUT) < _client_data) {
340 fprintf(stderr, "Error sending client data for iteration %d in client thread %d\n", index, *id );
341 goto done;
344 #ifdef DEBUG
345 fprintf(stdout, "client thread %d socket wrote %d\n", *id, _client_data);
346 #endif
348 write_done = PR_IntervalNow();
349 if ( (bytes = _readn(sock_h, data_buffer, _server_data)) < _server_data) {
350 fprintf(stderr, "Error reading server data for iteration %d in client thread %d (read %d bytes)\n", index, *id, bytes );
351 goto done;
354 #ifdef DEBUG
355 fprintf(stdout, "client thread %d socket read %d\n", *id, _server_data);
356 #endif
358 read_done = PR_IntervalNow();
359 PR_Close(sock_h);
360 close_done = PR_IntervalNow();
362 timer_data[2*(*id)+1].d_connect += _delta(&start, &connect_done);
363 timer_data[2*(*id)+1].d_cl_data += _delta(&connect_done, &write_done);
364 timer_data[2*(*id)+1].d_sv_data += _delta(&write_done, &read_done);
365 timer_data[2*(*id)+1].d_close += _delta(&read_done, &close_done);
366 timer_data[2*(*id)+1].d_total += _delta(&start, &close_done);
367 timer_data[2*(*id)+1].requests++;
369 done:
370 if (data_buffer != NULL) PR_Free (data_buffer);
371 _thread_exit(*id);
373 return;
376 static
377 void do_work(void)
379 int index;
381 _thread_exit_count = _threads * 2;
382 for (index=0; index<_threads; index++) {
383 PRThread *thread;
384 int *id = (int *)PR_Malloc(sizeof(int));
386 *id = index;
388 if ( (thread = PR_CreateThread(PR_USER_THREAD,
389 _server_thread,
390 id,
391 PR_PRIORITY_NORMAL,
392 scope1,
393 PR_UNJOINABLE_THREAD,
394 0)) == NULL)
395 fprintf(stderr, "Error creating server thread %d\n", index);
398 PR_EnterMonitor(exit_cv);
399 while (_thread_exit_count > 0)
400 PR_Wait(exit_cv, PR_INTERVAL_NO_TIMEOUT);
401 PR_ExitMonitor(exit_cv);
403 fprintf(stdout, "TEST COMPLETE!\n");
405 tally_results(verbose);
409 static void do_workUU(void)
411 scope1 = PR_LOCAL_THREAD;
412 scope2 = PR_LOCAL_THREAD;
413 do_work();
416 static void do_workUK(void)
418 scope1 = PR_LOCAL_THREAD;
419 scope2 = PR_GLOBAL_THREAD;
420 do_work();
423 static void do_workKU(void)
425 scope1 = PR_GLOBAL_THREAD;
426 scope2 = PR_LOCAL_THREAD;
427 do_work();
430 static void do_workKK(void)
432 scope1 = PR_GLOBAL_THREAD;
433 scope2 = PR_GLOBAL_THREAD;
434 do_work();
439 static void Measure(void (*func)(void), const char *msg)
441 PRIntervalTime start, stop;
442 double d;
444 start = PR_IntervalNow();
445 (*func)();
446 stop = PR_IntervalNow();
448 d = (double)PR_IntervalToMicroseconds(stop - start);
450 printf("%40s: %6.2f usec\n", msg, d / _iterations);
454 int main(int argc, char **argv)
456 #if defined(XP_UNIX) || defined(XP_OS2_EMX)
457 int opt;
458 PR_IMPORT_DATA(char *) optarg;
459 #endif
461 #if defined(XP_UNIX) || defined(XP_OS2_EMX)
462 while ( (opt = getopt(argc, argv, "c:s:i:t:v")) != EOF) {
463 switch(opt) {
464 case 'i':
465 _iterations = atoi(optarg);
466 break;
467 case 't':
468 _threads_max = _threads = atoi(optarg);
469 break;
470 case 'c':
471 _client_data = atoi(optarg);
472 break;
473 case 's':
474 _server_data = atoi(optarg);
475 break;
476 case 'v':
477 verbose = 1;
478 break;
479 default:
480 break;
483 #endif
485 PR_Init(PR_USER_THREAD, PR_PRIORITY_NORMAL, 0);
486 PR_STDIO_INIT();
488 #ifdef XP_MAC
489 SetupMacPrintfLog("sel_spd.log");
490 #endif
492 fprintf(stdout, "Running test for %d iterations with %d simultaneous threads.\n",
493 _iterations, _threads);
494 fprintf(stdout, "\tWill send %d bytes of client data and %d bytes of server data\n",
495 _client_data, _server_data);
497 if ( (exit_cv = PR_NewMonitor()) == NULL)
498 fprintf(stderr, "Error creating monitor for exit cv\n");
499 if ( (timer_data = (timer_slot_t *)PR_Malloc(2*_threads * sizeof(timer_slot_t))) == NULL)
500 fprintf(stderr, "error allocating thread time results array\n");
501 memset(timer_data, 0 , 2*_threads*sizeof(timer_slot_t));
503 Measure(do_workUU, "select loop user/user");
504 Measure(do_workUK, "select loop user/kernel");
505 Measure(do_workKU, "select loop kernel/user");
506 Measure(do_workKK, "select loop kernel/kernel");
509 return 0;
512 void
513 tally_results(int verbose)
515 int index;
516 unsigned long tot_connect = 0;
517 unsigned long tot_cl_data = 0;
518 unsigned long tot_sv_data = 0;
519 unsigned long tot_close = 0;
520 unsigned long tot_all = 0;
521 unsigned long tot_requests = 0;
523 fprintf(stdout, "Server results:\n\n");
524 for (index=0; index<_threads_max*2; index+=2) {
526 if (verbose)
527 fprintf(stdout, "server thread %u\t%u\t%u\t%u\t%u\t%u\t%u\n",
528 index, timer_data[index].requests, timer_data[index].d_connect,
529 timer_data[index].d_cl_data, timer_data[index].d_sv_data,
530 timer_data[index].d_close, timer_data[index].d_total);
532 tot_connect += timer_data[index].d_connect / _threads;
533 tot_cl_data += timer_data[index].d_cl_data / _threads;
534 tot_sv_data += timer_data[index].d_sv_data / _threads;
535 tot_close += timer_data[index].d_close / _threads;
536 tot_all += timer_data[index].d_total / _threads;
537 tot_requests += timer_data[index].requests / _threads;
539 fprintf(stdout, "----------\n");
540 fprintf(stdout, "server per thread totals %u\t%u\t%u\t%u\t%u\n",
541 tot_requests, tot_connect, tot_cl_data, tot_sv_data, tot_close);
542 fprintf(stdout, "server per thread elapsed time %u\n", tot_all);
543 fprintf(stdout, "----------\n");
545 tot_connect = tot_cl_data = tot_sv_data = tot_close = tot_all = tot_requests = 0;
546 fprintf(stdout, "Client results:\n\n");
547 for (index=1; index<_threads_max*2; index+=2) {
549 if (verbose)
550 fprintf(stdout, "client thread %u\t%u\t%u\t%u\t%u\t%u\t%u\n",
551 index, timer_data[index].requests, timer_data[index].d_connect,
552 timer_data[index].d_cl_data, timer_data[index].d_sv_data,
553 timer_data[index].d_close, timer_data[index].d_total);
555 tot_connect += timer_data[index].d_connect / _threads;
556 tot_cl_data += timer_data[index].d_cl_data / _threads;
557 tot_sv_data += timer_data[index].d_sv_data / _threads;
558 tot_close += timer_data[index].d_close / _threads;
559 tot_all += timer_data[index].d_total / _threads;
560 tot_requests += timer_data[index].requests / _threads;
562 fprintf(stdout, "----------\n");
563 fprintf(stdout, "client per thread totals %u\t%u\t%u\t%u\t%u\n",
564 tot_requests, tot_connect, tot_cl_data, tot_sv_data, tot_close);
565 fprintf(stdout, "client per thread elapsed time %u\n", tot_all);