Merge branch 'master' of git://git.gromacs.org/gromacs
[gromacs/adressmacs.git] / src / gmxlib / thread_mpi / impl.h
blob4d3a03b2cef6733e78fad79da5ee14c8b6c5c3a3
1 /*
2 This source code file is part of thread_mpi.
3 Written by Sander Pronk, Erik Lindahl, and possibly others.
5 Copyright (c) 2009, Sander Pronk, Erik Lindahl.
6 All rights reserved.
8 Redistribution and use in source and binary forms, with or without
9 modification, are permitted provided that the following conditions are met:
10 1) Redistributions of source code must retain the above copyright
11 notice, this list of conditions and the following disclaimer.
12 2) Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
15 3) Neither the name of the copyright holders nor the
16 names of its contributors may be used to endorse or promote products
17 derived from this software without specific prior written permission.
19 THIS SOFTWARE IS PROVIDED BY US ''AS IS'' AND ANY
20 EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
21 WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 DISCLAIMED. IN NO EVENT SHALL WE BE LIABLE FOR ANY
23 DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
24 (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25 LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
26 ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
28 SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 If you want to redistribute modifications, please consider that
31 scientific software is very special. Version control is crucial -
32 bugs must be traceable. We will be happy to consider code for
33 inclusion in the official distribution, but derived work should not
34 be called official thread_mpi. Details are found in the README & COPYING
35 files.
39 /* this is the header file for the implementation side of the thread_mpi
40 library. It contains the definitions for all the internal data structures
41 and the prototypes for all the internal functions that aren't static. */
44 #ifdef HAVE_UNISTD_H
45 #include <unistd.h>
46 #endif
48 #ifdef HAVE_SYS_TIME_H
49 #include <sys/time.h>
50 #endif
52 #include <errno.h>
53 #include <stdlib.h>
54 #include <stdio.h>
55 #include <string.h>
58 #include "settings.h"
59 #include "thread_mpi/atomic.h"
60 #include "thread_mpi/threads.h"
61 #include "thread_mpi/event.h"
62 #include "thread_mpi/tmpi.h"
63 #include "thread_mpi/collective.h"
64 #include "thread_mpi/barrier.h"
65 #include "thread_mpi/hwinfo.h"
66 #include "thread_mpi/lock.h"
67 #ifdef TMPI_PROFILE
68 #include "profile.h"
69 #endif
73 /**************************************************************************
75 BASIC DEFINITIONS
77 **************************************************************************/
80 typedef int tmpi_bool;
81 #define TRUE 1
82 #define FALSE 0
86 #ifdef USE_COLLECTIVE_COPY_BUFFER
87 /**************************************************************************
89 PRE-ALLOCATED COMMUNICATION BUFFERS
91 **************************************************************************/
94 /* Buffer structure for collective communications. Every thread structure
95 has several of these ready to be used when the collective data
96 transmission is small enough for double copying to occur (i.e. the size
97 of the transmission is less than N*MAX_COPY_BUFFER_SIZE, where N is the
98 number of receiving threads). */
99 struct copy_buffer
101 void *buf; /* the actual buffer */
102 struct copy_buffer *next; /* pointer to next free buffer in buffer_list */
103 size_t size; /* allocated size of buffer */
106 /* a list of copy_buffers of a specific size. */
107 struct copy_buffer_list
109 struct copy_buffer *cb; /* pointer to the first copy_buffer */
110 size_t size; /* allocated size of buffers in this list */
111 struct copy_buffer *cb_alloc; /* list as allocated */
112 int Nbufs; /* number of allocated buffers */
114 #endif
128 /**************************************************************************
130 POINT-TO-POINT COMMUNICATION DATA STRUCTURES
132 **************************************************************************/
134 /* the message envelopes (as described in the MPI standard).
135 These fully describes the message, and make each message unique (enough).
137 Transmitting data works by having the sender put a pointer to an envelope
138 onto the receiver's new envelope list corresponding to the originating
139 thread.
140 The sender then waits until the receiver finishes the transmission, while
141 matching all incoming new envelopes against its own list of receive
142 envelopes.
144 The receiver either directly matches its receiving envelope against
145 all previously un-matched sending envelopes, or, if no suitable envelope
146 is found, it puts the receive envelope on a receive list.
147 Once waiting for completion, the receiver matches against all incoming
148 new envelopes. */
150 /* the state of an individual point-to-point transmission */
151 enum envelope_state
153 env_unmatched = 0, /* the envelope has not had a match yet */
154 env_copying = 1, /* busy copying (only used for send envelope
155 by receiver if using_cpbuf is true,
156 but cb was still NULL). */
157 env_cb_available = 2, /* the copy buffer is available. Set by
158 the sender on a send_buffer. */
159 env_finished = 3 /* the transmission has finished */
163 /* the envelope. Held in tmpi_thread->evs[src_thread] for send envelopes,
164 or in tmpi_thread->evl for receive envelopes */
165 struct envelope
167 int tag; /* the tag */
168 tMPI_Comm comm; /* this is a structure shared across threads, so we
169 can test easily whether two threads are talking
170 about the same comm. */
172 struct tmpi_thread *src, *dest; /* these are pretty obvious */
174 void *buf; /* buffer to be sent */
175 size_t bufsize; /* the size of the data to be transmitted */
176 tMPI_Datatype datatype; /* the data type */
178 tmpi_bool nonblock; /* whether the receiver is non-blocking */
180 /* state, values from enum_envelope_state .
181 (there's a few busy-waits relying on this flag).
182 status=env_unmatched is the initial state.*/
183 tMPI_Atomic_t state;
185 /* the error condition */
186 int error;
188 /* the message status */
189 /*tMPI_Status *status;*/
191 /* prev and next envelopes in the send/recv_envelope_list linked list */
192 struct envelope *prev,*next;
194 tmpi_bool send; /* whether this is a send envelope (if TRUE), or a receive
195 envelope (if FALSE) */
196 #ifdef USE_SEND_RECV_COPY_BUFFER
197 tmpi_bool using_cb; /* whether a copy buffer is (going to be) used */
198 void* cb;/* the allocated copy buffer pointer */
199 #endif
200 /* the next and previous envelopes in the request list */
201 struct envelope *prev_req, *next_req;
203 /* the list I'm in */
204 struct recv_envelope_list *rlist;
205 struct send_envelope_list *slist;
210 /* singly linked lists of free send & receive envelopes belonging to a
211 thread. */
212 struct free_envelope_list
214 struct envelope *head_recv; /* the first element in the linked list */
215 struct envelope *recv_alloc_head; /* the allocated recv list */
218 /* collection of send envelopes to a specific thread */
219 struct send_envelope_list
221 struct envelope *head_free; /* singly linked list with free send
222 envelopes. A single-thread LIFO.*/
223 #ifdef TMPI_LOCK_FREE_LISTS
224 tMPI_Atomic_ptr_t head_new; /* singly linked list with the new send
225 envelopes (i.e. those that are put there by
226 the sending thread, but not yet checked by
227 the receiving thread). This is a lock-free
228 shared detachable list.*/
229 tMPI_Atomic_ptr_t head_rts; /* singly linked list with free send
230 envelopes returned by the other thread.
231 This is a lock-free shared LIFO.*/
232 #else
233 struct envelope *head_new; /* singly linked list with the new send
234 envelopes (i.e. those that are put there by
235 the sending thread, but not yet checked by
236 the receiving thread). */
237 struct envelope *head_rts; /* singly linked list with free send envelopes */
238 tMPI_Spinlock_t lock_new; /* this locks head_new */
239 tMPI_Spinlock_t lock_rts; /* this locks head_rts */
240 #endif
241 struct envelope *head_old; /* the old send envelopes, in a circular doubly
242 linked list. These have been checked by the
243 receiving thread against the existing
244 recv_envelope_list. */
246 struct envelope *alloc_head; /* the allocated send list */
247 size_t Nalloc; /* number of allocted sends */
250 struct recv_envelope_list
252 struct envelope *head; /* first envelope in this list */
253 struct envelope dummy; /* the dummy element for the list */
257 /* the request object for asynchronious operations. */
258 struct tmpi_req_
260 tmpi_bool finished; /* whether it's finished */
261 struct envelope *ev; /* the envelope */
263 struct tmpi_thread *source; /* the message source (for receives) */
264 tMPI_Comm comm; /* the comm */
265 int tag; /* the tag */
266 int error; /* error code */
267 size_t transferred; /* the number of transferred bytes */
268 tmpi_bool cancelled; /* whether the transmission was canceled */
270 struct tmpi_req_ *next,*prev; /* next,prev request in linked list,
271 used in the req_list, but also in
272 tMPI_Test_mult(). */
275 /* pre-allocated request object list */
276 struct req_list
278 struct tmpi_req_ *head; /* pre-allocated singly linked list of requests.
279 (i.e. reqs->prev is undefined). */
280 struct tmpi_req_ *alloc_head; /* the allocated block */
298 /**************************************************************************
300 MULTICAST COMMUNICATION DATA STRUCTURES
302 **************************************************************************/
304 /* these are data structures meant for keeping track of multicast operations
305 (tMPI_Bcast, tMPI_Gather, etc.). Because these operations are all collective
306 across the comm, and are always blocking, the protocol can be much simpler
307 than that for point-to-point communication through tMPI_Send/Recv, etc. */
309 /* unique tags for multicast & collective operations */
310 #define TMPI_BCAST_TAG 1
311 #define TMPI_GATHER_TAG 2
312 #define TMPI_GATHERV_TAG 3
313 #define TMPI_SCATTER_TAG 4
314 #define TMPI_SCATTERV_TAG 5
315 #define TMPI_REDUCE_TAG 6
316 #define TMPI_ALLTOALL_TAG 7
317 #define TMPI_ALLTOALLV_TAG 8
320 /* thread-specific part of the coll_env */
321 struct coll_env_thread
323 tMPI_Atomic_t current_sync; /* sync counter value for the current
324 communication */
325 tMPI_Atomic_t n_remaining; /* remaining threads count for each thread */
327 int tag; /* collective communication type */
328 tMPI_Datatype datatype; /* datatype */
330 void **buf; /* array of send/recv buffer values */
331 size_t *bufsize; /* array of number of bytes to send/recv */
333 #ifdef USE_COLLECTIVE_COPY_BUFFER
334 tmpi_bool using_cb; /* whether a copy buffer is (going to be) used */
335 tMPI_Atomic_t buf_readcount; /* Number of threads reading from buf
336 while using_cpbuf is true, but cpbuf
337 is still NULL. */
338 tMPI_Atomic_ptr_t *cpbuf; /* copy_buffer pointers. */
339 struct copy_buffer *cb; /* the copy buffer cpbuf points to */
340 #endif
342 tMPI_Event send_ev; /* event associated with being the sending thread.
343 Triggered when last receiving thread is ready,
344 and the coll_env_thread is ready for re-use. */
345 tMPI_Event recv_ev; /* event associated with being a receiving thread. */
347 tmpi_bool *read_data; /* whether we read data from a specific thread. */
350 /* Collective communications once sync. These run in parallel with
351 the collection of coll_env_threads*/
352 struct coll_env_coll
354 /* collective sync data */
355 tMPI_Atomic_t current_sync; /* sync counter value for the current
356 communication */
357 tMPI_Atomic_t n_remaining; /* remaining threads count */
359 void *res; /* result data for once calls. */
362 /* the collective communication envelope. There's a few of these per
363 comm, and each one stands for one collective communication call. */
364 struct coll_env
366 struct coll_env_thread *met; /* thread-specific collective envelope data.*/
368 struct coll_env_coll coll;
369 int N;
372 /* multicast synchronization data structure. There's one of these for
373 each thread in each tMPI_Comm structure */
374 struct coll_sync
376 int synct; /* sync counter for coll_env_thread. */
377 int syncs; /* sync counter for coll_env_coll. */
379 tMPI_Event *events; /* One event for each other thread */
380 int N; /* the number of threads */
393 /**************************************************************************
395 THREAD DATA STRUCTURES
397 **************************************************************************/
399 /* information about a running thread. This structure is put in a
400 globally available array; the envelope exchange, etc. are all done through
401 the elements of this array.*/
402 struct tmpi_thread
404 tMPI_Thread_t thread_id; /* this thread's id */
406 /* p2p communication structures: */
408 /* the receive envelopes posted for other threads to check */
409 struct recv_envelope_list evr;
410 /* the send envelopes posted by other threadas */
411 struct send_envelope_list *evs;
412 /* free send and receive envelopes */
413 struct free_envelope_list envelopes;
414 /* number of finished send envelopes */
415 tMPI_Atomic_t ev_outgoing_received;
416 /* the p2p communication events (incoming envelopes + finished send
417 envelopes generate events) */
418 tMPI_Event p2p_event;
419 TMPI_YIELD_WAIT_DATA /* data associated with waiting */
420 struct req_list rql; /* list of pre-allocated requests */
422 /* collective communication structures: */
423 #ifdef USE_COLLECTIVE_COPY_BUFFER
424 /* copy buffer list for multicast communications */
425 struct copy_buffer_list cbl_multi;
426 #endif
428 /* miscellaneous data: */
430 tMPI_Comm self_comm; /* comms for MPI_COMM_SELF */
431 #ifdef TMPI_PROFILE
432 /* the per-thread profile structure that keeps call counts & wait times. */
433 struct tmpi_profile profile;
434 #endif
435 /* The start function (or NULL, if a main()-style start function is to
436 be called) */
437 void (*start_fn)(void*);
438 /* The main()-style start function */
439 int (*start_fn_main)(int, char**);
440 /* the argument to the start function, if it's not main()*/
441 void *start_arg;
443 /* we copy these for each thread (providing these to main() is not
444 required by the MPI standard, but it's convenient). Note that we copy,
445 because some programs (like Gromacs) like to manipulate these. */
446 int argc;
447 char **argv;
455 /**************************************************************************
457 ERROR HANDLER DATA STRUCTURES
459 **************************************************************************/
462 /* the error handler */
463 struct tmpi_errhandler_
465 int err;
466 tMPI_Errhandler_fn fn;
469 /* standard error handler functions */
470 void tmpi_errors_are_fatal_fn(tMPI_Comm *comm, int *err);
471 void tmpi_errors_return_fn(tMPI_Comm *comm, int *err);
477 /**************************************************************************
479 GLOBAL DATA STRUCTURE
481 **************************************************************************/
483 /* global MPI information */
484 struct tmpi_global
486 /* list of pointers to all user-defined types */
487 struct tmpi_datatype_ **usertypes;
488 int N_usertypes;
489 int Nalloc_usertypes;
491 /* spinlock/mutex for manipulating tmpi_user_types */
492 tMPI_Spinlock_t datatype_lock;
494 /* barrier for tMPI_Finalize(), etc. */
495 tMPI_Thread_barrier_t barrier;
497 /* the timer for tMPI_Wtime() */
498 tMPI_Thread_mutex_t timer_mutex;
499 #if ! (defined( _WIN32 ) || defined( _WIN64 ) )
500 /* the time at initialization. */
501 struct timeval timer_init;
502 #else
503 /* the time at initialization. */
504 DWORD timer_init;
505 #endif
522 /**************************************************************************
524 COMMUNICATOR DATA STRUCTURES
526 **************************************************************************/
529 struct tmpi_group_
531 int N; /* the number of threads */
532 struct tmpi_thread **peers; /* the list of peers to communicate with */
533 #if 0
534 int Nrefs; /* the number of references to this structure */
535 #endif
539 /* the communicator objects are globally shared. */
540 struct tmpi_comm_
542 struct tmpi_group_ grp; /* the communicator group */
544 /* the barrier for tMPI_Barrier() */
545 tMPI_Barrier_t barrier;
548 /* List of barriers for reduce operations.
549 reduce_barrier[0] contains a list of N/2 barriers for N threads
550 reduce_barrier[1] contains a list of N/4 barriers for N/2 threads
551 reduce_barrier[2] contains a list of N/8 barriers for N/4 threads
552 and so on. (until N/x reaches 1)
553 This is to facilitate tree-based algorithms for tMPI_Reduce, etc. */
554 tMPI_Barrier_t **reduce_barrier;
555 int *N_reduce; /* the number of barriers in each iteration */
556 int N_reduce_iter; /* the number of iterations */
559 struct coll_env *cev; /* list of multicast envelope objecs */
560 struct coll_sync *csync; /* list of multicast sync objecs */
562 /* lists of globally shared send/receive buffers for tMPI_Reduce. */
563 tMPI_Atomic_ptr_t *reduce_sendbuf, *reduce_recvbuf;
565 /* mutex for communication object creation. Traditional mutexes are
566 better here because communicator creation should not be done in
567 time-critical sections of code. */
568 tMPI_Thread_mutex_t comm_create_lock;
569 tMPI_Thread_cond_t comm_create_prep;
570 tMPI_Thread_cond_t comm_create_finish;
572 tMPI_Comm *new_comm; /* newly created communicators */
574 /* the split structure is shared among the comm threads and is
575 allocated & deallocated during tMPI_Comm_split */
576 struct tmpi_split *split;
578 /* the topologies (only cartesian topology is currently implemented */
579 struct cart_topol *cart;
580 /*struct tmpi_graph_topol_ *graph;*/
582 tMPI_Errhandler erh;
584 /* links for a global circular list of all comms that starts at
585 TMPI_COMM_WORLD. Used to de-allocate the comm structures after
586 tMPI_Finalize(). */
587 struct tmpi_comm_ *next,*prev;
591 /* specific for tMPI_Split: */
592 struct tmpi_split
594 volatile int Ncol_init;
595 volatile int Ncol_destroy;
596 volatile tmpi_bool can_finish;
597 volatile int *colors;
598 volatile int *keys;
601 /* cartesian topology */
602 struct cart_topol
604 int ndims; /* number of dimensions */
605 int *dims; /* procs per coordinate */
606 int *periods; /* whether the grid is periodic, per dimension */
609 #if 0
610 /* graph topology */
611 struct tmpi_graph_topol_
614 #endif
620 /**************************************************************************
622 DATA TYPE DATA STRUCTURES
624 **************************************************************************/
626 /* tMPI_Reduce Op functions */
627 typedef void (*tMPI_Op_fn)(void*, void*, void*, int);
630 struct tmpi_datatype_component
632 struct tmpi_datatype_ *type;
633 unsigned int count;
636 /* we don't support datatypes with holes (yet) */
637 struct tmpi_datatype_
639 size_t size; /* full extent of type. */
640 tMPI_Op_fn *op_functions; /* array of op functions for this datatype */
641 int N_comp; /* number of components */
642 struct tmpi_datatype_component *comps; /* the components */
643 tmpi_bool committed; /* whether the data type is committed */
645 /* just as a shorthand: */
646 typedef struct tmpi_datatype_ tmpi_dt;
655 /**************************************************************************
657 GLOBAL VARIABLES
659 **************************************************************************/
662 /* the threads themselves (tmpi_comm only contains lists of pointers to this
663 structure */
664 extern struct tmpi_thread *threads;
665 extern int Nthreads;
667 /* thread info */
668 extern tMPI_Thread_key_t id_key; /* the key to get the thread id */
670 /* misc. global information about MPI */
671 extern struct tmpi_global *tmpi_global;
680 /**************************************************************************
682 FUNCTION PROTOTYPES & MACROS
684 **************************************************************************/
686 #ifdef TMPI_TRACE
687 void tMPI_Trace_print(const char *fmt, ...);
688 #endif
690 /* error-checking malloc/realloc: */
691 void *tMPI_Malloc(size_t size);
692 void *tMPI_Realloc(void *p, size_t size);
695 /* get the current thread structure pointer */
696 #define tMPI_Get_current() ((struct tmpi_thread*) \
697 tMPI_Thread_getspecific(id_key))
699 /* get the number of this thread */
700 /*#define tMPI_This_threadnr() (tMPI_Get_current() - threads)*/
702 /* get the number of a specific thread. We convert to the resulting size_t to
703 int, which is unlikely to cause problems in the foreseeable future. */
704 #define tMPI_Threadnr(th) (int)(th - threads)
706 /* get thread associated with rank */
707 #define tMPI_Get_thread(comm, rank) (comm->grp.peers[rank])
710 #if 0
711 /* get the current thread structure pointer */
712 struct tmpi_thread *tMPI_Get_current(void);
713 /* get the thread belonging to comm with rank rank */
714 struct tmpi_thread *tMPI_Get_thread(tMPI_Comm comm, int rank);
716 #endif
718 /* handle an error, returning the errorcode */
719 int tMPI_Error(tMPI_Comm comm, int tmpi_errno);
723 /* check whether we're the main thread */
724 tmpi_bool tMPI_Is_master(void);
725 /* check whether the current process is in a group */
726 tmpi_bool tMPI_In_group(tMPI_Group group);
728 /* find the rank of a thread in a comm */
729 int tMPI_Comm_seek_rank(tMPI_Comm comm, struct tmpi_thread *th);
730 /* find the size of a comm */
731 int tMPI_Comm_N(tMPI_Comm comm);
733 /* allocate a comm object, making space for N threads */
734 tMPI_Comm tMPI_Comm_alloc(tMPI_Comm parent, int N);
735 /* de-allocate a comm object */
736 void tMPI_Comm_destroy(tMPI_Comm comm);
737 /* allocate a group object */
738 tMPI_Group tMPI_Group_alloc(void);
740 /* topology functions */
741 /* de-allocate a cartesian topology structure. (it is allocated with
742 the internal function tMPI_Cart_init()) */
743 void tMPI_Cart_destroy(struct cart_topol *top);
750 /* initialize a free envelope list with N envelopes */
751 void tMPI_Free_env_list_init(struct free_envelope_list *evl, int N);
752 /* destroy a free envelope list */
753 void tMPI_Free_env_list_destroy(struct free_envelope_list *evl);
756 /* initialize a send envelope list */
757 void tMPI_Send_env_list_init(struct send_envelope_list *evl, int N);
758 /* destroy a send envelope list */
759 void tMPI_Send_env_list_destroy(struct send_envelope_list *evl);
766 /* initialize a recv envelope list */
767 void tMPI_Recv_env_list_init(struct recv_envelope_list *evl);
768 /* destroy a recv envelope list */
769 void tMPI_Recv_env_list_destroy(struct recv_envelope_list *evl);
774 /* initialize request list */
775 void tMPI_Req_list_init(struct req_list *rl, int N_reqs);
776 /* destroy request list */
777 void tMPI_Req_list_destroy(struct req_list *rl);
781 /* collective data structure ops */
784 /* initialize a coll env structure */
785 void tMPI_Coll_env_init(struct coll_env *mev, int N);
786 /* destroy a coll env structure */
787 void tMPI_Coll_env_destroy(struct coll_env *mev);
789 /* initialize a coll sync structure */
790 void tMPI_Coll_sync_init(struct coll_sync *msc, int N);
791 /* destroy a coll sync structure */
792 void tMPI_Coll_sync_destroy(struct coll_sync *msc);
794 #ifdef USE_COLLECTIVE_COPY_BUFFER
795 /* initialize a copy_buffer_list */
796 void tMPI_Copy_buffer_list_init(struct copy_buffer_list *cbl, int Nbufs,
797 size_t size);
798 /* initialize a copy_buffer_list */
799 void tMPI_Copy_buffer_list_destroy(struct copy_buffer_list *cbl);
800 /* get a copy buffer from a list */
801 struct copy_buffer *tMPI_Copy_buffer_list_get(struct copy_buffer_list *cbl);
802 /* return a copy buffer to a list */
803 void tMPI_Copy_buffer_list_return(struct copy_buffer_list *cbl,
804 struct copy_buffer *cb);
805 /* initialize a copy buffer */
806 void tMPI_Copy_buffer_init(struct copy_buffer *cb, size_t size);
807 void tMPI_Copy_buffer_destroy(struct copy_buffer *cb);
808 #endif
813 /* and we need this prototype */
814 int main(int argc, char **argv);