make: fix access to env vars
[netsniff.git] / src / mops_threads.c
blob69893db1ff9865027bc40cf863b477c7daecc1e1
1 /*
2 * Mausezahn - A fast versatile traffic generator
3 * Copyright (C) 2008-2010 Herbert Haas
4 *
5 * This program is free software; you can redistribute it and/or modify it under
6 * the terms of the GNU General Public License version 2 as published by the
7 * Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
11 * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
12 * details.
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, see http://www.gnu.org/licenses/gpl-2.0.html
19 #include "mz.h"
20 #include "mops.h"
21 #include "cli.h"
22 #include "llist.h"
26 void mops_set_active (struct mops *mp)
28 pthread_mutex_lock (& (mp->mops_mutex) );
29 mp->state = MOPS_STATE_ACTIVE;
30 pthread_mutex_unlock (& (mp->mops_mutex) );
33 void mops_set_seqact (struct mops *mp)
35 pthread_mutex_lock (& (mp->mops_mutex) );
36 mp->state = MOPS_STATE_SEQACT;
37 pthread_mutex_unlock (& (mp->mops_mutex) );
41 void mops_set_conf (struct mops *mp)
43 pthread_mutex_lock (& (mp->mops_mutex) );
44 mp->state = MOPS_STATE_CONFIG;
45 pthread_mutex_unlock (& (mp->mops_mutex) );
49 int mops_is_active (struct mops *mp)
51 int i=0;
52 pthread_mutex_lock (& (mp->mops_mutex) );
53 if (mp->state == MOPS_STATE_ACTIVE) i=1;
54 pthread_mutex_unlock (& (mp->mops_mutex) );
55 return i;
58 // Returns 1 if the packet is in any running state
59 // such as MOPS_STATE_ACTIVE or MOPS_STATE_SEQACT
60 int mops_is_any_active (struct mops *mp)
62 int i=0;
63 pthread_mutex_lock (& (mp->mops_mutex) );
64 if (mp->state > MOPS_STATE_CONFIG) i=1;
65 pthread_mutex_unlock (& (mp->mops_mutex) );
66 return i;
70 int mops_is_seqact (struct mops *mp)
72 int i=0;
73 pthread_mutex_lock (& (mp->mops_mutex) );
74 if (mp->state == MOPS_STATE_SEQACT) i=1;
75 pthread_mutex_unlock (& (mp->mops_mutex) );
76 return i;
81 // return mops state (0=MOPS_STATE_NULL, 1=MOPS_STATE_INIT, 2=MOPS_STATE_CONFIG, 3=MOPS_STATE_ACTIVE, 4=MOPS_STATE_SEQACT)
82 int mops_state (struct mops *mp)
84 int i=0;
85 pthread_mutex_lock (& (mp->mops_mutex) );
86 i = mp->state;
87 pthread_mutex_unlock (& (mp->mops_mutex) );
88 return i;
92 int mops_tx_simple (struct mops *mp)
95 if (mops_is_active(mp)) {
96 return 3;
99 if (mp->interval_used) {
100 if ( pthread_create( &(mp->interval_thread), NULL, mops_interval_thread, mp) ) {
101 mp->interval_used=1; // 1 means interval only configured
102 return 1; // Error creating thread
104 } else // normal packet train
105 if ( pthread_create( &(mp->mops_thread), NULL, mops_tx_thread_native, mp) ) {
106 return 1; // Error creating thread
109 return 0;
113 // Starts a packet sequence.
115 // RETURN VALUES: 0 upon success
116 // 1 failure: packet not in CONFIG state
117 // 2 failure: packet has infinite count
118 int mops_tx_sequence (struct mz_ll *seq)
120 struct pseq *cur;
121 int i;
123 // verify 1) that all packets are in config state
124 // 2) and have finite count:
125 cur = (struct pseq*) seq->data;
126 for (i=0; i<cur->count; i++) {
127 if (cur->packet[i]->state!=MOPS_STATE_CONFIG) return 1;
128 if (cur->packet[i]->count==0) return 2;
131 // Set all packets in this sequence into state SEQACT:
132 for (i=0; i<cur->count; i++)
133 mops_set_seqact (cur->packet[i]);
135 if ( pthread_create( &(seq->sequence_thread), NULL, mops_sequence_thread, seq) ) {
136 return 3; // Error creating thread
138 seq->state=1;
139 return 0;
143 // This is the sequence sending thread
144 void *mops_sequence_thread (void *arg)
146 struct mz_ll *seq = (struct mz_ll*) arg;
147 struct pseq *cur;
148 int i;
150 cur = (struct pseq*) seq->data;
152 // Send one packet after each other, possibly with gaps inbetween:
153 for (i=0; i<cur->count; i++) {
154 mops_tx_thread_native (cur->packet[i]);
155 // if gap exists...
156 if ((cur->gap[i].tv_sec) || (cur->gap[i].tv_nsec)) {
157 nanosleep(&cur->gap[i], NULL); //...apply it.
161 // Finally:
162 // 1) reset all packets into config state
163 for (i=0; i<cur->count; i++)
164 cur->packet[i]->state=MOPS_STATE_CONFIG;
165 // 2) join to main
166 pthread_exit(NULL);
167 // 3) set sequence state to inactive (=0)
168 seq->state=0;
170 return NULL;
173 // This is the interval management thread which starts
174 // packet transmission threads by itself.
176 // Note how this works: After the while statement below we have actually
177 // two threads, mops_tx_thread_native (sending the packet) and mops_interval_thread which
178 // starts mops_tx_thread_native every mp->interval. If mp->interval is smaller than
179 // mp->delay (and mp->count > 1) then multiple transmission threads will be active at the
180 // same time which is usually not what the user wants. We do not catch this case here
181 // but the user interface should do that (it is done in 'cmd_packet_interval').
183 void *mops_interval_thread (void *arg)
185 struct mops *mp = (struct mops*) arg;
187 mp->interval_used=2; // 2 means active interval
188 while (1) {
189 if ( pthread_create( &(mp->mops_thread), NULL, mops_tx_thread_native, mp) ) {
190 mp->interval_used=1;
191 pthread_exit(NULL);
193 nanosleep(&mp->interval, NULL);
196 pthread_exit(NULL); // hmm...does this make sense?
197 return NULL;
201 // General MOPS sending thread using packet sockets.
203 void *mops_tx_thread_native (void *arg)
205 struct mops *mp = (struct mops*) arg;
206 struct mops_ext_rtp * pd;
207 int ps, i, n=0;
208 u_int8_t DA[4];
209 // Local vars are faster --------------------------
210 struct timespec tv;
211 register int infinity, devind;
212 int ip_src_isrange = mp->use_IP & mp->ip_src_isrange;
213 int ip_dst_isrange = mp->use_IP & mp->ip_dst_isrange;
214 int sp_isrange = (mp->use_UDP | mp->use_TCP) & mp->sp_isrange;
215 int dp_isrange = (mp->use_UDP | mp->use_TCP) & mp->dp_isrange;
216 int ip_src_israndom = mp->use_IP & mp->ip_src_israndom;
217 int sp_isrand = (mp->use_UDP | mp->use_TCP) & mp->sp_isrand;
218 int dp_isrand = (mp->use_UDP | mp->use_TCP) & mp->dp_isrand;
221 u_int32_t
222 ip_src_start = mp->ip_src_start,
223 ip_src_stop = mp->ip_src_stop,
224 ip_dst_start = mp->ip_dst_start,
225 ip_dst_stop = mp->ip_dst_stop,
226 tcp_seq_delta = mp->tcp_seq_delta,
227 tcp_seq_range = 0,
228 tcp_ack_delta = mp->tcp_ack_delta,
229 tcp_ack_range = 0,
230 tcp_ack_count = 0,
231 tcp_seq_count = 0;
234 sp_start = mp->sp_start,
235 dp_start = mp->dp_start,
236 sp_stop = mp->sp_stop,
237 dp_stop = mp->dp_stop;
239 int
240 rtp_mode = 0; // RTP not used
243 fragsize = 0,
244 frag_overlap = 0,
245 fragptr = 0,
246 offset = 0,
247 offset_delta = 0,
248 begin_ip_payload = 0,
249 ip_payload_s = 0,
250 original_msg_s = 0,
251 whats_used = 0; // store use_UDP or use_TCP here to clean up packet parameters finally
252 char
253 original_msg[MAX_MOPS_MSG_SIZE+1], // temporary buffer when fragmentation is needed
254 ip_payload[MAX_MOPS_MSG_SIZE+1]; // temporary buffer when fragmentation is needed
257 // -------------------------------------------------
260 /////////////////////////////
261 // NOTE: If packet is part of a sequence, then this function is already part of a sequence thread
262 // and all packets are already in state SEQACT. Otherwise we set the packet in state ACTIVE.
263 if (!mops_is_seqact(mp))
264 mops_set_active (mp);
265 /////////////////////////////
268 // infinite or not? Count up or down?
269 if (mp->count == 0) {
270 infinity = 1;
271 mp->cntx = 0;
273 else {
274 infinity = 0;
275 mp->cntx = mp->count; // count down
278 // Which delay?
279 tv.tv_sec = mp->ndelay.tv_sec;
280 tv.tv_nsec = mp->ndelay.tv_nsec;
282 // Which interface?
283 for (i=0; i<device_list_entries; i++) {
284 if (strncmp(device_list[i].dev, mp->device, 15)==0) break;
286 devind=i;
288 // Packet socket already existing and valid?
289 ps = device_list[devind].ps; // the packet socket
290 if (ps<0) goto FIN;
292 // Automatic direct or indirect delivery for IP packets?
293 if ((mp->use_IP) && (mp->auto_delivery_off == 0)) {
294 if (mp->ip_dst_isrange)
295 mops_hton4(&mp->ip_dst_start, DA);
296 else
297 mops_hton4(&mp->ip_dst, DA);
299 mops_ip_get_dst_mac(&device_list[devind], DA, mp->eth_dst);
303 // Impossible settings
304 if (((ip_src_isrange) && (ip_src_israndom)) ||
305 ((sp_isrand) && (sp_isrange)) ||
306 ((dp_isrand) && (dp_isrange))) {
307 fprintf(stderr, "[ERROR] (mops_tx_thread_native) -- conflicting requirements: both range and random!\n");
308 goto FIN;
311 // Initialize start values when ranges have been defined
312 if (ip_src_isrange) mp->ip_src = mp->ip_src_start;
313 if (ip_dst_isrange) mp->ip_dst = mp->ip_dst_start;
314 if (sp_isrange) mp->sp = mp->sp_start;
315 if (dp_isrange) mp->dp = mp->dp_start;
316 if (tcp_seq_delta) {
317 tcp_seq_range = mops_tcp_complexity_sqnr(mp);
318 mp->tcp_seq = mp->tcp_seq_start;
319 tcp_seq_count = tcp_seq_range;
321 if (tcp_ack_delta) {
322 tcp_ack_range = mops_tcp_complexity_acknr(mp);
323 mp->tcp_ack = mp->tcp_ack_start;
324 tcp_ack_count = tcp_ack_range;
327 // RTP special message treatment
328 if (mp->p_desc_type == MOPS_RTP) {
329 pd = mp->p_desc;
330 if (pd==NULL) return NULL;
331 if (pd->source == DSP_SOURCE)
332 rtp_mode = 2; // dsp payload
333 else
334 rtp_mode = 1; // zero payload
336 mops_update_rtp (mp); // initialize RTP packet here
339 // TODO: VLAN, MPLS - ranges
342 // ---------------------- The holy transmission loop ---------------- //
345 // Update whole packet (once before loop!)
346 mops_ext_update (mp);
347 mops_update(mp);
350 // Check if IP fragmentation is desired.
351 // If yes, set local 'fragsize' and 'begin_ip_payload' pointer.
352 if (mp->ip_fragsize) {
353 if (mp->use_IP) {
354 fragsize = mp->ip_fragsize;
355 frag_overlap = mp->ip_frag_overlap;
356 offset = mp->ip_frag_offset;
357 offset_delta = (fragsize-frag_overlap)/8;
358 if (mp->use_UDP) {
359 begin_ip_payload = mp->begin_UDP;
360 whats_used = 1;
361 } else if (mp->use_TCP) {
362 begin_ip_payload = mp->begin_TCP;
363 whats_used = 2;
364 } else {
365 begin_ip_payload = mp->begin_MSG;
366 whats_used = 0;
368 ip_payload_s = mp->frame_s - begin_ip_payload;
369 memcpy((void*) original_msg, (void*) mp->msg, mp->msg_s);
370 original_msg_s = mp->msg_s;
371 memcpy((void*) ip_payload, (void*) &mp->frame[begin_ip_payload], ip_payload_s);
376 goto START; // looks like a dirty hack but reduces a few cpu cycles each loop
378 do {
379 INLOOP:
380 nanosleep(&tv, NULL); // don't apply this before first and after last packet.
381 START:
383 // +++++++++++++++++++++++++++++++++++
386 // ------ IP fragmentation required? ------------------------------------------------------
388 // Basic idea: At this point we assume that all updates have been already applied
389 // so mp->frame contains a valid packet. But now we do the following:
391 // 1. Determine first byte after end of IP header (IP options may be used) [done above]
392 // 2. Store the 'IP payload' in the temporary buffer 'ip_payload' [done above]
393 // 3. Create a new IP payload but take only the first fragsize bytes out of 'ip_payload'
394 // 4. This new IP payload is copied into mp->msg
395 // 5. Set the IP parameters: MF=1, offset=0
396 // 6. Call mops_update() and send the packet
397 // 7. offset = offset + fragsize/8
398 // 8. Increment the IP identification number
399 // 9. Repeat this until the last fragment is reached. For the last fragment
400 // set the flag MF=0.
401 // 10. Restore the original IP parameters (use_UDP or use_TCP)
402 if (fragsize) {
403 mp->use_UDP=0;
404 mp->use_TCP=0;
405 fragptr=0; // NOTE: by intention we do not set mp->ip_frag_offset to 0 here !!! The user knows what she does!
406 mp->ip_flags_MF=1;
407 mp->ip_id++; // automatically wraps around correctly (u_int16_t)
408 // send all fragments except the last one:
409 while(fragptr+fragsize < ip_payload_s) {
410 memcpy((void*) mp->msg, (void*) ip_payload+fragptr, fragsize);
411 mp->msg_s = fragsize;
412 mops_update(mp);
413 n = write(ps, mp->frame, mp->frame_s);
414 if (n!=mp->frame_s) {
415 fprintf(stderr, "ERROR: Could not send IP fragment through interface %s\n", mp->device);
416 // LOG error msg
417 goto FIN;
419 fragptr+=fragsize;
420 mp->ip_frag_offset += offset_delta;
422 // send last fragment:
423 mp->ip_flags_MF=0;
424 memcpy((void*) mp->msg, (void*) ip_payload+fragptr, ip_payload_s-fragptr);
425 mp->msg_s = ip_payload_s-fragptr;
426 mops_update(mp);
427 n = write(ps, mp->frame, mp->frame_s);
428 if (n!=mp->frame_s) {
429 fprintf(stderr, "ERROR: Could not send IP fragment through interface %s\n", mp->device);
430 // LOG error msg
431 goto FIN;
434 // -- restore original mops parameters --
435 switch (whats_used) {
436 case 1: mp->use_UDP = 1; break;
437 case 2: mp->use_TCP = 1; break;
439 memcpy((void*) mp->msg, (void*) original_msg, original_msg_s);
440 mp->msg_s = original_msg_s;
441 mp->ip_frag_offset=offset;
442 goto NEXT;
445 // -- send unfragmented packets here: --
446 n = write(ps, mp->frame, mp->frame_s);
447 if (n!=mp->frame_s) {
448 fprintf(stderr, "ERROR: Could not send packet through interface %s\n", mp->device);
449 // LOG error msg
450 goto FIN;
453 NEXT:
455 /* [ RTP TODO: ] Use another thread reading from /dev/dsp and signalling us to continue!
456 * It should work like this: (pseudocode below)
458 * if (rtp_mode == DSP_SOURCE) {
459 * pthread_cond_wait ( &mycond, &mymutex ); // wait until pthread condition is signaled
460 * // now, frame should contain 160 bytes from /dev/dsp
461 * goto INLOOP;
462 * }
464 * The reading thread will do something like this: (again fuzzy code only)
466 * loop:
467 * read(fd, pd->rtp_payload, 160); // this takes 20 msec anyway
468 * mops_update_rtp_dynamics (mp); // also updates dynamic header fields
469 * pthread_cond_broadcast (&mycond); // wake up TX thread
470 * goto loop;
472 * See also
473 * http://www.oreilly.de/catalog/multilinux/excerpt/ch14-05.htm
475 * NOTE that we must not reach nanosleep below because the 20 msec delay is
476 * done implicitely by reading 160 bytes from /dev/dsp
479 switch (rtp_mode) {
480 case 1: // dummy payload => segmentation delay is controlled by nanosleep below!
481 mops_update_rtp_dynamics (mp);
482 break;
483 case 2: // await data from /dev/dsp => segmentation delay is controlled by a reading thread!
484 /* pthread_cond_wait ( &mycond, &mymutex ); // wait until pthread condition is signaled
485 * // now, frame should contain 160 bytes from /dev/dsp
486 * goto INLOOP;
488 break;
489 default:
490 // no RTP, continue as usual
491 break;
495 // +++++++++++++++++++++++++++++++++++
497 // *** begin of modifiers -- order is important! *** *************** //
499 if (tcp_seq_delta) {
500 if (--tcp_seq_count) {
501 mp->tcp_seq += tcp_seq_delta;
502 mops_update(mp);
503 goto INLOOP;
504 } else {
505 tcp_seq_count = tcp_seq_range;
506 mp->tcp_seq = mp->tcp_seq_start;
507 mops_update(mp);
511 if (tcp_ack_delta) {
512 if (--tcp_ack_count) {
513 mp->tcp_ack += tcp_ack_delta;
514 mops_update(mp);
515 goto INLOOP;
516 } else {
517 tcp_ack_count = tcp_ack_range;
518 mp->tcp_ack = mp->tcp_ack_start;
519 mops_update(mp);
523 if (ip_src_isrange) {
524 if (++mp->ip_src > ip_src_stop) {
525 mp->ip_src = ip_src_start;
526 mops_update(mp);
528 else {
529 mops_update(mp);
530 goto INLOOP;
534 if (ip_src_israndom) {
535 mp->ip_src = 0x01000001 + (u_int32_t) ( ((float) rand()/RAND_MAX)*0xE0000000); //latter is 224.0.0.0
538 if (ip_dst_isrange) {
539 if (++mp->ip_dst > ip_dst_stop) {
540 mp->ip_dst = ip_dst_start;
541 if (mp->auto_delivery_off == 0) {
542 mops_hton4(&mp->ip_dst, DA);
543 mp->eth_dst[0] = 0x01;
544 mp->eth_dst[1] = 0x00;
545 mp->eth_dst[2] = 0x5e;
546 mp->eth_dst[3] = DA[1] & 127;
547 mp->eth_dst[4] = DA[2];
548 mp->eth_dst[5] = DA[3];
550 mops_update(mp);
552 else {
553 if (mp->auto_delivery_off == 0) {
554 mops_hton4(&mp->ip_dst, DA);
555 mp->eth_dst[0] = 0x01;
556 mp->eth_dst[1] = 0x00;
557 mp->eth_dst[2] = 0x5e;
558 mp->eth_dst[3] = DA[1] & 127;
559 mp->eth_dst[4] = DA[2];
560 mp->eth_dst[5] = DA[3];
562 mops_update(mp);
563 goto INLOOP;
567 if (dp_isrange) {
568 if (++mp->dp > dp_stop) {
569 mp->dp = dp_start;
570 mops_update(mp);
572 else {
573 mops_update(mp);
574 goto INLOOP;
578 if (dp_isrand) {
579 mp->dp = (u_int16_t) ( ((float) rand()/RAND_MAX)*0xffff);
583 if (sp_isrange) {
584 if (++mp->sp > sp_stop) {
585 mp->sp = sp_start;
586 mops_update(mp);
588 else {
589 mops_update(mp);
590 goto INLOOP;
594 if (sp_isrand) {
595 mp->sp = (u_int16_t) ( ((float) rand()/RAND_MAX)*0xffff);
599 // *** end of modifiers ******************************************** //
600 if (infinity) {
601 mp->cntx++; // count up
602 goto INLOOP;
604 } while (--mp->cntx);
606 FIN:
607 if (!mops_is_seqact(mp)) {
608 // only [change state and close thread] if packet is NOT part of a sequence.
609 // If the packet is part of a sequence then THIS function is already part of
610 // a sequence thread and it will be closed in 'mops_sequence_thread'.
611 mops_set_conf (mp);
612 pthread_exit(NULL);
614 return NULL;
621 int mops_destroy_thread (struct mops *mp)
623 int r=1;
625 if (mp->interval_used==2) {
626 pthread_cancel(mp->interval_thread);
627 mp->interval_used=1;
628 r=0;
631 if (mops_is_active(mp)) {
632 pthread_cancel(mp->mops_thread);
633 pthread_mutex_destroy(& mp->mops_mutex);
634 mops_set_conf(mp);
635 r=0;
638 return r;