1 /***************************************************************************
3 * Project ___| | | | _ \| |
5 * | (__| |_| | _ <| |___
6 * \___|\___/|_| \_\_____|
8 * Copyright (C) 1998 - 2002, Daniel Stenberg, <daniel@haxx.se>, et al.
10 * This software is licensed as described in the file COPYING, which
11 * you should have received as part of this distribution. The terms
12 * are also available at http://curl.haxx.se/docs/copyright.html.
14 * You may opt to use, copy, modify, merge, publish, distribute and/or sell
15 * copies of the Software, and permit persons to whom the Software is
16 * furnished to do so, under the terms of the COPYING file.
18 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
19 * KIND, either express or implied.
21 * $Id: transfer.c,v 1.3 2003-01-14 14:12:37 andy Exp $
22 ***************************************************************************/
26 /* -- WIN32 approved -- */
32 #include <sys/types.h>
39 #if defined(WIN32) && !defined(__GNUC__) || defined(__MINGW32__)
44 #ifdef HAVE_SYS_SOCKET_H
45 #include <sys/socket.h>
47 #include <netinet/in.h>
49 #include <sys/resource.h>
54 #ifdef HAVE_ARPA_INET_H
55 #include <arpa/inet.h>
60 #include <sys/ioctl.h>
63 #ifdef HAVE_SYS_PARAM_H
64 #include <sys/param.h>
67 #ifdef HAVE_SYS_SELECT_H
68 #include <sys/select.h>
72 #error "We can't compile without select() support!"
75 #error "We can't compile without socket() support!"
81 #include <curl/curl.h>
82 #include <curl/types.h>
85 #include "content_encoding.h" /* content encoding support. 08/27/02 jhrg */
90 #include "speedcheck.h"
99 #define _MPRINTF_REPLACE /* use our functions only */
100 #include <curl/mprintf.h>
102 /* The last #include file should be: */
104 #include "memdebug.h"
108 #define min(a, b) ((a) < (b) ? (a) : (b))
117 /* We keep this static and global since this is read-only and NEVER
118 changed. It should just remain a blanked-out timeout value. */
119 static struct timeval notimeout
={0,0};
122 * This function will call the read callback to fill our buffer with data
125 static int fillbuffer(struct connectdata
*conn
,
128 int buffersize
= bytes
;
131 if(conn
->bits
.upload_chunky
) {
132 /* if chunked Transfer-Encoding */
133 buffersize
-= (8 + 2 + 2); /* 32bit hex + CRLF + CRLF */
134 conn
->upload_fromhere
+= 10; /* 32bit hex + CRLF */
137 nread
= conn
->fread(conn
->upload_fromhere
, 1,
138 buffersize
, conn
->fread_in
);
140 if(!conn
->bits
.forbidchunk
&& conn
->bits
.upload_chunky
) {
141 /* if chunked Transfer-Encoding */
143 int hexlen
= snprintf(hexbuffer
, sizeof(hexbuffer
),
145 /* move buffer pointer */
146 conn
->upload_fromhere
-= hexlen
;
149 /* copy the prefix to the buffer */
150 memcpy(conn
->upload_fromhere
, hexbuffer
, hexlen
);
152 /* append CRLF to the data */
153 memcpy(conn
->upload_fromhere
+
158 /* mark this as done once this chunk is transfered */
159 conn
->keep
.upload_done
= TRUE
;
168 * Returns TRUE if member of the list matches prefix of string
171 checkhttpprefix(struct SessionHandle
*data
,
174 struct curl_slist
*head
= data
->set
.http200aliases
;
177 if (checkprefix(head
->data
, s
))
182 if(checkprefix("HTTP/", s
))
188 CURLcode
Curl_readwrite(struct connectdata
*conn
,
191 struct Curl_transfer_keeper
*k
= &conn
->keep
;
192 struct SessionHandle
*data
= conn
->data
;
194 ssize_t nread
; /* number of bytes read */
197 /* These two are used only if no other select() or _fdset() have been
198 invoked before this. This typicly happens if you use the multi interface
199 and call curl_multi_perform() without calling curl_multi_fdset()
204 fd_set
*readfdp
= k
->readfdp
;
205 fd_set
*writefdp
= k
->writefdp
;
207 if((k
->keepon
& KEEP_READ
) && !readfdp
) {
208 /* reading is requested, but no socket descriptor pointer was set */
209 FD_ZERO(&extrareadfd
);
210 FD_SET(conn
->sockfd
, &extrareadfd
);
211 readfdp
= &extrareadfd
;
213 /* no write, no exceptions, no timeout */
214 select(conn
->sockfd
+1, readfdp
, NULL
, NULL
, ¬imeout
);
216 if((k
->keepon
& KEEP_WRITE
) && !writefdp
) {
217 /* writing is requested, but no socket descriptor pointer was set */
218 FD_ZERO(&extrawritefd
);
219 FD_SET(conn
->writesockfd
, &extrawritefd
);
220 writefdp
= &extrawritefd
;
222 /* no read, no exceptions, no timeout */
223 select(conn
->writesockfd
+1, NULL
, writefdp
, NULL
, ¬imeout
);
227 /* If we still have reading to do, we check if we have a readable
228 socket. Sometimes the reafdp is NULL, it no fd_set was done using
229 the multi interface and then we can do nothing but to attempt a
231 if((k
->keepon
& KEEP_READ
) &&
232 (FD_ISSET(conn
->sockfd
, readfdp
))) {
234 bool readdone
= FALSE
;
236 /* This is where we loop until we have read everything there is to
237 read or we get a EWOULDBLOCK */
241 result
= Curl_read(conn
, conn
->sockfd
, k
->buf
,
242 data
->set
.buffer_size
?
243 data
->set
.buffer_size
:BUFSIZE
-1,
247 break; /* get out of loop */
251 if ((k
->bytecount
== 0) && (k
->writebytecount
== 0))
252 Curl_pgrsTime(data
, TIMER_STARTTRANSFER
);
254 didwhat
|= KEEP_READ
;
256 /* NULL terminate, allowing string ops to be used */
260 /* if we receive 0 or less here, the server closed the connection and
261 we bail out from this! */
262 else if (0 >= nread
) {
263 k
->keepon
&= ~KEEP_READ
;
264 FD_ZERO(&k
->rkeepfd
);
269 /* Default buffer to use when we write the buffer, it may be changed
270 in the flow below before the actual storing is done. */
273 /* Since this is a two-state thing, we check if we are parsing
274 headers at the moment or not. */
276 /* we are in parse-the-header-mode */
277 bool stop_reading
= FALSE
;
279 /* header line within buffer loop */
283 /* str_start is start of line within buf */
284 k
->str_start
= k
->str
;
286 k
->end_ptr
= strchr (k
->str_start
, '\n');
289 /* Not a complete header line within buffer, append the data to
290 the end of the headerbuff. */
292 if (k
->hbuflen
+ nread
>= data
->state
.headersize
) {
293 /* We enlarge the header buffer as it is too small */
295 long newsize
=MAX((k
->hbuflen
+nread
)*3/2,
296 data
->state
.headersize
*2);
297 hbufp_index
= k
->hbufp
- data
->state
.headerbuff
;
298 newbuff
= (char *)realloc(data
->state
.headerbuff
, newsize
);
300 failf (data
, "Failed to alloc memory for big header!");
301 return CURLE_OUT_OF_MEMORY
;
303 data
->state
.headersize
=newsize
;
304 data
->state
.headerbuff
= newbuff
;
305 k
->hbufp
= data
->state
.headerbuff
+ hbufp_index
;
307 memcpy(k
->hbufp
, k
->str
, nread
);
310 if (!k
->headerline
&& (k
->hbuflen
>5)) {
311 /* make a first check that this looks like a HTTP header */
312 if(!checkhttpprefix(data
, data
->state
.headerbuff
)) {
313 /* this is not the beginning of a HTTP first header line */
315 k
->badheader
= HEADER_ALLBAD
;
320 break; /* read more and try again */
323 /* decrease the size of the remaining buffer */
324 nread
-= (k
->end_ptr
- k
->str
)+1;
326 k
->str
= k
->end_ptr
+ 1; /* move past new line */
329 * We're about to copy a chunk of data to the end of the
330 * already received header. We make sure that the full string
331 * fit in the allocated header buffer, or else we enlarge
334 if (k
->hbuflen
+ (k
->str
- k
->str_start
) >=
335 data
->state
.headersize
) {
337 long newsize
=MAX((k
->hbuflen
+
338 (k
->str
-k
->str_start
))*3/2,
339 data
->state
.headersize
*2);
340 hbufp_index
= k
->hbufp
- data
->state
.headerbuff
;
341 newbuff
= (char *)realloc(data
->state
.headerbuff
, newsize
);
343 failf (data
, "Failed to alloc memory for big header!");
344 return CURLE_OUT_OF_MEMORY
;
346 data
->state
.headersize
= newsize
;
347 data
->state
.headerbuff
= newbuff
;
348 k
->hbufp
= data
->state
.headerbuff
+ hbufp_index
;
351 /* copy to end of line */
352 strncpy (k
->hbufp
, k
->str_start
, k
->str
- k
->str_start
);
353 k
->hbufp
+= k
->str
- k
->str_start
;
354 k
->hbuflen
+= k
->str
- k
->str_start
;
357 k
->p
= data
->state
.headerbuff
;
360 * We now have a FULL header line that p points to
364 /* the first read header */
366 !checkhttpprefix(data
, data
->state
.headerbuff
)) {
367 /* this is not the beginning of a HTTP first header line */
369 k
->badheader
= HEADER_PARTHEADER
;
374 if (('\n' == *k
->p
) || ('\r' == *k
->p
)) {
376 /* Zero-length header line means end of headers! */
379 k
->p
++; /* pass the \r byte */
381 k
->p
++; /* pass the \n byte */
383 if(100 == k
->httpcode
) {
385 * we have made a HTTP PUT or POST and this is 1.1-lingo
386 * that tells us that the server is OK with this and ready
387 * to receive our stuff.
388 * However, we'll get more headers now so we must get
389 * back into the header-parsing state!
392 k
->headerline
= 0; /* restart the header line counter */
393 /* if we did wait for this do enable write now! */
394 if (k
->write_after_100_header
) {
396 k
->write_after_100_header
= FALSE
;
397 FD_SET (conn
->writesockfd
, &k
->writefd
); /* write */
398 k
->keepon
|= KEEP_WRITE
;
399 k
->wkeepfd
= k
->writefd
;
403 k
->header
= FALSE
; /* no more header to parse! */
405 if (417 == k
->httpcode
) {
407 * we got: "417 Expectation Failed" this means:
408 * we have made a HTTP call and our Expect Header
409 * seems to cause a problem => abort the write operations
410 * (or prevent them from starting).
412 k
->write_after_100_header
= FALSE
;
413 k
->keepon
&= ~KEEP_WRITE
;
414 FD_ZERO(&k
->wkeepfd
);
417 /* now, only output this if the header AND body are requested:
419 k
->writetype
= CLIENTWRITE_HEADER
;
420 if (data
->set
.http_include_header
)
421 k
->writetype
|= CLIENTWRITE_BODY
;
423 headerlen
= k
->p
- data
->state
.headerbuff
;
425 result
= Curl_client_write(data
, k
->writetype
,
426 data
->state
.headerbuff
,
431 data
->info
.header_size
+= headerlen
;
432 conn
->headerbytecount
+= headerlen
;
436 * really end-of-headers.
438 * If we requested a "no body", this is a good time to get
439 * out and return home.
441 if(data
->set
.no_body
)
443 else if(!conn
->bits
.close
) {
444 /* If this is not the last request before a close, we must
445 set the maximum download size to the size of the
446 expected document or else, we won't know when to stop
449 conn
->maxdownload
= conn
->size
;
451 /* If max download size is *zero* (nothing) we already
452 have nothing and can safely return ok now! */
453 if(0 == conn
->maxdownload
)
457 /* we make sure that this socket isn't read more now */
458 k
->keepon
&= ~KEEP_READ
;
459 FD_ZERO(&k
->rkeepfd
);
462 break; /* exit header line loop */
465 /* We continue reading headers, so reset the line-based
466 header parsing variables hbufp && hbuflen */
467 k
->hbufp
= data
->state
.headerbuff
;
473 * Checks for special headers coming up.
476 if (!k
->headerline
++) {
477 /* This is the first header, it MUST be the error code line
478 or else we consiser this to be the body right away! */
479 int httpversion_major
;
480 int nc
=sscanf (k
->p
, " HTTP/%d.%d %3d",
485 k
->httpversion
+= 10 * httpversion_major
;
488 /* this is the real world, not a Nirvana
489 NCSA 1.5.x returns this crap when asked for HTTP/1.1
491 nc
=sscanf (k
->p
, " HTTP %3d", &k
->httpcode
);
494 /* If user has set option HTTP200ALIASES,
495 compare header line against list of aliases
498 if (checkhttpprefix(data
, k
->p
)) {
502 (data
->set
.httpversion
==CURL_HTTP_VERSION_1_0
)? 10 : 11;
508 data
->info
.httpcode
= k
->httpcode
;
509 data
->info
.httpversion
= k
->httpversion
;
511 /* 404 -> URL not found! */
512 if (data
->set
.http_fail_on_error
&&
513 (k
->httpcode
>= 400)) {
514 /* If we have been told to fail hard on HTTP-errors,
515 here is the check for that: */
516 /* serious error, go home! */
517 failf (data
, "The requested file was not found");
518 return CURLE_HTTP_RETURNED_ERROR
;
521 if(k
->httpversion
== 10)
522 /* Default action for HTTP/1.0 must be to close, unless
523 we get one of those fancy headers that tell us the
524 server keeps it open for us! */
525 conn
->bits
.close
= TRUE
;
527 switch(k
->httpcode
) {
529 /* (quote from RFC2616, section 10.2.5): The server has
530 * fulfilled the request but does not need to return an
531 * entity-body ... The 204 response MUST NOT include a
532 * message-body, and thus is always terminated by the first
533 * empty line after the header fields. */
536 /* (quote from RFC2616, section 10.3.5): The 304 response MUST
537 * NOT contain a message-body, and thus is always terminated
538 * by the first empty line after the header fields. */
548 k
->header
= FALSE
; /* this is not a header line */
553 /* check for Content-Length: header lines to get size */
554 if (checkprefix("Content-Length:", k
->p
) &&
555 sscanf (k
->p
+15, " %ld", &k
->contentlength
)) {
556 conn
->size
= k
->contentlength
;
557 Curl_pgrsSetDownloadSize(data
, k
->contentlength
);
559 /* check for Content-Type: header lines to get the mime-type */
560 else if (checkprefix("Content-Type:", k
->p
)) {
565 /* Find the first non-space letter */
567 *start
&& isspace((int)*start
);
570 /* count all non-space letters following */
571 for(end
=start
, len
=0;
572 *end
&& !isspace((int)*end
);
575 /* allocate memory of a cloned copy */
576 data
->info
.contenttype
= malloc(len
+ 1);
577 if (NULL
== data
->info
.contenttype
)
578 return CURLE_OUT_OF_MEMORY
;
580 /* copy the content-type string */
581 memcpy(data
->info
.contenttype
, start
, len
);
582 data
->info
.contenttype
[len
] = 0; /* zero terminate */
584 else if((k
->httpversion
== 10) &&
585 conn
->bits
.httpproxy
&&
586 Curl_compareheader(k
->p
,
587 "Proxy-Connection:", "keep-alive")) {
589 * When a HTTP/1.0 reply comes when using a proxy, the
590 * 'Proxy-Connection: keep-alive' line tells us the
591 * connection will be kept alive for our pleasure.
592 * Default action for 1.0 is to close.
594 conn
->bits
.close
= FALSE
; /* don't close when done */
595 infof(data
, "HTTP/1.0 proxy connection set to keep alive!\n");
597 else if((k
->httpversion
== 10) &&
598 Curl_compareheader(k
->p
, "Connection:", "keep-alive")) {
600 * A HTTP/1.0 reply with the 'Connection: keep-alive' line
601 * tells us the connection will be kept alive for our
602 * pleasure. Default action for 1.0 is to close.
604 * [RFC2068, section 19.7.1] */
605 conn
->bits
.close
= FALSE
; /* don't close when done */
606 infof(data
, "HTTP/1.0 connection set to keep alive!\n");
608 else if (Curl_compareheader(k
->p
, "Connection:", "close")) {
610 * [RFC 2616, section 8.1.2.1]
611 * "Connection: close" is HTTP/1.1 language and means that
612 * the connection will close when this request has been
615 conn
->bits
.close
= TRUE
; /* close when done */
617 else if (Curl_compareheader(k
->p
,
618 "Transfer-Encoding:", "chunked")) {
620 * [RFC 2616, section 3.6.1] A 'chunked' transfer encoding
621 * means that the server will send a series of "chunks". Each
622 * chunk starts with line with info (including size of the
623 * coming block) (terminated with CRLF), then a block of data
624 * with the previously mentioned size. There can be any amount
625 * of chunks, and a chunk-data set to zero signals the
627 conn
->bits
.chunk
= TRUE
; /* chunks coming our way */
629 /* init our chunky engine */
630 Curl_httpchunk_init(conn
);
632 else if (checkprefix("Content-Encoding:", k
->p
) &&
633 data
->set
.encoding
) {
635 * Process Content-Encoding. Look for the values: identity, gzip,
636 * defalte, compress, x-gzip and x-compress. x-gzip and
637 * x-compress are the same as gzip and compress. (Sec 3.5 RFC
638 * 2616). zlib cannot handle compress, and gzip is not currently
639 * implemented. However, errors are handled further down when the
640 * response body is processed 08/27/02 jhrg */
643 /* Find the first non-space letter */
645 *start
&& isspace((int)*start
);
648 /* Record the content-encoding for later use. 08/27/02 jhrg */
649 if (checkprefix("identity", start
))
650 k
->content_encoding
= IDENTITY
;
651 else if (checkprefix("deflate", start
))
652 k
->content_encoding
= DEFLATE
;
653 else if (checkprefix("gzip", start
)
654 || checkprefix("x-gzip", start
))
655 k
->content_encoding
= GZIP
;
656 else if (checkprefix("compress", start
)
657 || checkprefix("x-compress", start
))
658 k
->content_encoding
= COMPRESS
;
660 else if (checkprefix("Content-Range:", k
->p
)) {
661 if (sscanf (k
->p
+14, " bytes %d-", &k
->offset
) ||
662 sscanf (k
->p
+14, " bytes: %d-", &k
->offset
)) {
663 /* This second format was added August 1st 2000 by Igor
664 Khristophorov since Sun's webserver JavaWebServer/1.1.1
665 obviously sends the header this way! :-( */
666 if (conn
->resume_from
== k
->offset
) {
667 /* we asked for a resume and we got it */
668 k
->content_range
= TRUE
;
672 else if(data
->cookies
&&
673 checkprefix("Set-Cookie:", k
->p
)) {
674 Curl_cookie_add(data
->cookies
, TRUE
, k
->p
+11, conn
->name
);
676 else if(checkprefix("Last-Modified:", k
->p
) &&
677 (data
->set
.timecondition
|| data
->set
.get_filetime
) ) {
678 time_t secs
=time(NULL
);
679 k
->timeofdoc
= curl_getdate(k
->p
+strlen("Last-Modified:"),
681 if(data
->set
.get_filetime
)
682 data
->info
.filetime
= k
->timeofdoc
;
684 else if ((k
->httpcode
>= 300 && k
->httpcode
< 400) &&
685 (data
->set
.http_follow_location
) &&
686 checkprefix("Location:", k
->p
)) {
687 /* this is the URL that the server advices us to get instead */
692 start
+= 9; /* pass "Location:" */
694 /* Skip spaces and tabs. We do this to support multiple
695 white spaces after the "Location:" keyword. */
696 while(*start
&& isspace((int)*start
))
698 ptr
= start
; /* start scanning here */
700 /* scan through the string to find the end */
701 while(*ptr
&& !isspace((int)*ptr
))
703 backup
= *ptr
; /* store the ending letter */
705 *ptr
= '\0'; /* zero terminate */
706 conn
->newurl
= strdup(start
); /* clone string */
707 *ptr
= backup
; /* restore ending letter */
712 * End of header-checks. Write them to the client.
715 k
->writetype
= CLIENTWRITE_HEADER
;
716 if (data
->set
.http_include_header
)
717 k
->writetype
|= CLIENTWRITE_BODY
;
719 if(data
->set
.verbose
)
720 Curl_debug(data
, CURLINFO_HEADER_IN
,
723 result
= Curl_client_write(data
, k
->writetype
, k
->p
,
728 data
->info
.header_size
+= k
->hbuflen
;
729 conn
->headerbytecount
+= k
->hbuflen
;
731 /* reset hbufp pointer && hbuflen */
732 k
->hbufp
= data
->state
.headerbuff
;
735 while (!stop_reading
&& *k
->str
); /* header line within buffer */
738 /* We've stopped dealing with input, get out of the do-while loop */
741 /* We might have reached the end of the header part here, but
742 there might be a non-header part left in the end of the read
745 } /* end if header mode */
747 /* This is not an 'else if' since it may be a rest from the header
748 parsing, where the beginning of the buffer is headers and the end
750 if (k
->str
&& !k
->header
&& (nread
> 0)) {
752 if(0 == k
->bodywrites
) {
753 /* These checks are only made the first time we are about to
754 write a piece of the body */
755 if(conn
->protocol
&PROT_HTTP
) {
756 /* HTTP-only checks */
758 /* abort after the headers if "follow Location" is set */
759 infof (data
, "Follow to new URL: %s\n", conn
->newurl
);
760 k
->keepon
&= ~KEEP_READ
;
761 FD_ZERO(&k
->rkeepfd
);
765 else if (conn
->resume_from
&&
767 (data
->set
.httpreq
==HTTPREQ_GET
)) {
768 /* we wanted to resume a download, although the server
769 doesn't seem to support this and we did this with a GET
770 (if it wasn't a GET we did a POST or PUT resume) */
771 failf (data
, "HTTP server doesn't seem to support "
772 "byte ranges. Cannot resume.");
773 return CURLE_HTTP_RANGE_ERROR
;
775 else if(data
->set
.timecondition
&& !conn
->range
) {
776 /* A time condition has been set AND no ranges have been
777 requested. This seems to be what chapter 13.3.4 of
778 RFC 2616 defines to be the correct action for a
780 if((k
->timeofdoc
> 0) && (data
->set
.timevalue
> 0)) {
781 switch(data
->set
.timecondition
) {
782 case TIMECOND_IFMODSINCE
:
784 if(k
->timeofdoc
< data
->set
.timevalue
) {
786 "The requested document is not new enough\n");
791 case TIMECOND_IFUNMODSINCE
:
792 if(k
->timeofdoc
> data
->set
.timevalue
) {
794 "The requested document is not old enough\n");
800 } /* two valid time strings */
801 } /* we have a time condition */
804 } /* this is the first time we write a body part */
807 /* pass data to the debug function before it gets "dechunked" */
808 if(data
->set
.verbose
) {
810 Curl_debug(data
, CURLINFO_DATA_IN
, data
->state
.headerbuff
,
812 if(k
->badheader
== HEADER_PARTHEADER
)
813 Curl_debug(data
, CURLINFO_DATA_IN
, k
->str
, nread
);
816 Curl_debug(data
, CURLINFO_DATA_IN
, k
->str
, nread
);
819 if(conn
->bits
.chunk
) {
821 * Bless me father for I have sinned. Here comes a chunked
822 * transfer flying and we need to decode this properly. While
823 * the name says read, this function both reads and writes away
824 * the data. The returned 'nread' holds the number of actual
825 * data it wrote to the client. */
827 Curl_httpchunk_read(conn
, k
->str
, nread
, &nread
);
829 if(CHUNKE_OK
< res
) {
830 if(CHUNKE_WRITE_ERROR
== res
) {
831 failf(data
, "Failed writing data");
832 return CURLE_WRITE_ERROR
;
834 failf(data
, "Received problem in the chunky parser");
835 return CURLE_RECV_ERROR
;
837 else if(CHUNKE_STOP
== res
) {
838 /* we're done reading chunks! */
839 k
->keepon
&= ~KEEP_READ
; /* read no more */
840 FD_ZERO(&k
->rkeepfd
);
842 /* There are now possibly N number of bytes at the end of the
843 str buffer that weren't written to the client, but we don't
844 care about them right now. */
846 /* If it returned OK, we just keep going */
849 if((-1 != conn
->maxdownload
) &&
850 (k
->bytecount
+ nread
>= conn
->maxdownload
)) {
851 nread
= conn
->maxdownload
- k
->bytecount
;
852 if(nread
< 0 ) /* this should be unusual */
855 k
->keepon
&= ~KEEP_READ
; /* we're done reading */
856 FD_ZERO(&k
->rkeepfd
);
859 k
->bytecount
+= nread
;
861 Curl_pgrsSetDownloadCounter(data
, (double)k
->bytecount
);
863 if(!conn
->bits
.chunk
&& (nread
|| k
->badheader
)) {
864 /* If this is chunky transfer, it was already written */
867 /* we parsed a piece of data wrongly assuming it was a header
868 and now we output it as body instead */
869 result
= Curl_client_write(data
, CLIENTWRITE_BODY
,
870 data
->state
.headerbuff
,
873 if(k
->badheader
< HEADER_ALLBAD
) {
874 /* This switch handles various content encodings. If there's an
875 error here, be sure to check over the almost identical code
876 in http_chunk.c. 08/29/02 jhrg */
878 switch (k
->content_encoding
) {
881 /* This is the default when the server sends no
882 Content-Encoding header. See Curl_readwrite_init; the
883 memset() call initializes k->content_encoding to zero.
885 result
= Curl_client_write(data
, CLIENTWRITE_BODY
, k
->str
,
891 /* Assume CLIENTWRITE_BODY; headers are not encoded. */
892 result
= Curl_unencode_deflate_write(data
, k
, nread
);
895 case GZIP
: /* FIXME 08/27/02 jhrg */
898 failf (data
, "Unrecognized content encoding type. "
899 "libcurl understands `identity' and `deflate' "
900 "content encodings.");
901 result
= CURLE_BAD_CONTENT_ENCODING
;
906 k
->badheader
= HEADER_NORMAL
; /* taken care of now */
912 } /* if (! header and data to read ) */
916 } /* if( read from socket ) */
918 /* If we still have writing to do, we check if we have a writable
919 socket. Sometimes the writefdp is NULL, it no fd_set was done using
920 the multi interface and then we can do nothing but to attempt a
922 if((k
->keepon
& KEEP_WRITE
) &&
923 (FD_ISSET(conn
->writesockfd
, writefdp
)) ) {
927 ssize_t bytes_written
;
928 bool writedone
=FALSE
;
930 if ((k
->bytecount
== 0) && (k
->writebytecount
== 0))
931 Curl_pgrsTime(data
, TIMER_STARTTRANSFER
);
933 didwhat
|= KEEP_WRITE
;
936 * We loop here to do the READ and SEND loop until we run out of
937 * data to send or until we get EWOULDBLOCK back
941 /* only read more data if there's no upload data already
942 present in the upload buffer */
943 if(0 == conn
->upload_present
) {
944 /* init the "upload from here" pointer */
945 conn
->upload_fromhere
= k
->uploadbuf
;
948 nread
= fillbuffer(conn
, BUFSIZE
);
950 nread
= 0; /* we're done uploading/reading */
952 /* the signed int typecase of nread of for systems that has
956 k
->keepon
&= ~KEEP_WRITE
; /* we're done writing */
957 FD_ZERO(&k
->wkeepfd
);
962 /* store number of bytes available for upload */
963 conn
->upload_present
= nread
;
965 /* convert LF to CRLF if so asked */
966 if (data
->set
.crlf
) {
967 for(i
= 0, si
= 0; i
< nread
; i
++, si
++) {
968 if (conn
->upload_fromhere
[i
] == 0x0a) {
969 data
->state
.scratch
[si
++] = 0x0d;
970 data
->state
.scratch
[si
] = 0x0a;
973 data
->state
.scratch
[si
] = conn
->upload_fromhere
[i
];
976 /* only perform the special operation if we really did replace
980 /* upload from the new (replaced) buffer instead */
981 conn
->upload_fromhere
= data
->state
.scratch
;
983 /* set the new amount too */
984 conn
->upload_present
= nread
;
989 /* We have a partial buffer left from a previous "round". Use
990 that instead of reading more data */
993 /* write to socket (send away data) */
994 result
= Curl_write(conn
,
995 conn
->writesockfd
, /* socket to send to */
996 conn
->upload_fromhere
, /* buffer pointer */
997 conn
->upload_present
, /* buffer size */
998 &bytes_written
); /* actually send away */
1001 else if(conn
->upload_present
!= bytes_written
) {
1002 /* we only wrote a part of the buffer (if anything), deal with it! */
1004 /* store the amount of bytes left in the buffer to write */
1005 conn
->upload_present
-= bytes_written
;
1007 /* advance the pointer where to find the buffer when the next send
1009 conn
->upload_fromhere
+= bytes_written
;
1011 writedone
= TRUE
; /* we are done, stop the loop */
1014 /* we've uploaded that buffer now */
1015 conn
->upload_fromhere
= k
->uploadbuf
;
1016 conn
->upload_present
= 0; /* no more bytes left */
1018 if(k
->upload_done
) {
1019 /* switch off writing, we're done! */
1020 k
->keepon
&= ~KEEP_WRITE
; /* we're done writing */
1021 FD_ZERO(&k
->wkeepfd
);
1026 if(data
->set
.verbose
)
1027 Curl_debug(data
, CURLINFO_DATA_OUT
, conn
->upload_fromhere
,
1031 k
->writebytecount
+= bytes_written
;
1032 Curl_pgrsSetUploadCounter(data
, (double)k
->writebytecount
);
1034 } while(!writedone
); /* loop until we're done writing! */
1038 } while(0); /* just to break out from! */
1041 /* Update read/write counters */
1042 if(conn
->bytecountp
)
1043 *conn
->bytecountp
= k
->bytecount
; /* read count */
1044 if(conn
->writebytecountp
)
1045 *conn
->writebytecountp
= k
->writebytecount
; /* write count */
1048 /* no read no write, this is a timeout? */
1049 if (k
->write_after_100_header
) {
1050 /* This should allow some time for the header to arrive, but only a
1051 very short time as otherwise it'll be too much wasted times too
1053 k
->write_after_100_header
= FALSE
;
1054 FD_SET (conn
->writesockfd
, &k
->writefd
); /* write socket */
1055 k
->keepon
|= KEEP_WRITE
;
1056 k
->wkeepfd
= k
->writefd
;
1060 k
->now
= Curl_tvnow();
1061 if(Curl_pgrsUpdate(conn
))
1062 result
= CURLE_ABORTED_BY_CALLBACK
;
1064 result
= Curl_speedcheck (data
, k
->now
);
1068 if (data
->set
.timeout
&&
1069 ((Curl_tvdiff(k
->now
, k
->start
)/1000) >= data
->set
.timeout
)) {
1070 failf (data
, "Operation timed out with %d out of %d bytes received",
1071 k
->bytecount
, conn
->size
);
1072 return CURLE_OPERATION_TIMEOUTED
;
1077 * The transfer has been performed. Just make some general checks before
1081 if(!(data
->set
.no_body
) && k
->contentlength
&&
1082 (k
->bytecount
!= k
->contentlength
) &&
1084 failf(data
, "transfer closed with %d bytes remaining to read",
1085 k
->contentlength
-k
->bytecount
);
1086 return CURLE_PARTIAL_FILE
;
1088 else if(conn
->bits
.chunk
&& conn
->proto
.http
->chunk
.datasize
) {
1089 failf(data
, "transfer closed with at least %d bytes remaining",
1090 conn
->proto
.http
->chunk
.datasize
);
1091 return CURLE_PARTIAL_FILE
;
1093 if(Curl_pgrsUpdate(conn
))
1094 return CURLE_ABORTED_BY_CALLBACK
;
1097 /* Now update the "done" boolean we return */
1103 CURLcode
Curl_readwrite_init(struct connectdata
*conn
)
1105 struct SessionHandle
*data
= conn
->data
;
1106 struct Curl_transfer_keeper
*k
= &conn
->keep
;
1108 /* NB: the content encoding software depends on this initialization of
1109 Curl_transfer_keeper. 08/28/02 jhrg */
1110 memset(k
, 0, sizeof(struct Curl_transfer_keeper
));
1112 k
->start
= Curl_tvnow(); /* start time */
1113 k
->now
= k
->start
; /* current time is now */
1114 k
->header
= TRUE
; /* assume header */
1115 k
->httpversion
= -1; /* unknown at this point */
1117 data
= conn
->data
; /* there's the root struct */
1118 k
->buf
= data
->state
.buffer
;
1119 k
->uploadbuf
= data
->state
.uploadbuffer
;
1120 k
->maxfd
= (conn
->sockfd
>conn
->writesockfd
?
1121 conn
->sockfd
:conn
->writesockfd
)+1;
1122 k
->hbufp
= data
->state
.headerbuff
;
1124 Curl_pgrsTime(data
, TIMER_PRETRANSFER
);
1125 Curl_speedinit(data
);
1127 Curl_pgrsSetUploadCounter(data
, 0);
1128 Curl_pgrsSetDownloadCounter(data
, 0);
1130 if (!conn
->bits
.getheader
) {
1133 Curl_pgrsSetDownloadSize(data
, conn
->size
);
1135 /* we want header and/or body, if neither then don't do this! */
1136 if(conn
->bits
.getheader
|| !data
->set
.no_body
) {
1138 FD_ZERO (&k
->readfd
); /* clear it */
1139 if(conn
->sockfd
!= -1) {
1140 FD_SET (conn
->sockfd
, &k
->readfd
); /* read socket */
1141 k
->keepon
|= KEEP_READ
;
1144 FD_ZERO (&k
->writefd
); /* clear it */
1145 if(conn
->writesockfd
!= -1) {
1146 if (data
->set
.expect100header
)
1147 /* wait with write until we either got 100-continue or a timeout */
1148 k
->write_after_100_header
= TRUE
;
1150 FD_SET (conn
->writesockfd
, &k
->writefd
); /* write socket */
1151 k
->keepon
|= KEEP_WRITE
;
1155 /* get these in backup variables to be able to restore them on each lap in
1156 the select() loop */
1157 k
->rkeepfd
= k
->readfd
;
1158 k
->wkeepfd
= k
->writefd
;
1165 void Curl_single_fdset(struct connectdata
*conn
,
1166 fd_set
*read_fd_set
,
1167 fd_set
*write_fd_set
,
1171 *max_fd
= -1; /* init */
1172 if(conn
->keep
.keepon
& KEEP_READ
) {
1173 FD_SET(conn
->sockfd
, read_fd_set
);
1174 *max_fd
= conn
->sockfd
;
1175 conn
->keep
.readfdp
= read_fd_set
; /* store the address of the set */
1177 if(conn
->keep
.keepon
& KEEP_WRITE
) {
1178 FD_SET(conn
->writesockfd
, write_fd_set
);
1179 if(conn
->writesockfd
> *max_fd
)
1180 *max_fd
= conn
->writesockfd
;
1181 conn
->keep
.writefdp
= write_fd_set
; /* store the address of the set */
1183 /* we don't use exceptions, only touch that one to prevent compiler
1185 *exc_fd_set
= *exc_fd_set
;
1192 * This function is what performs the actual transfer. It is capable of
1193 * doing both ways simultaneously.
1194 * The transfer must already have been setup by a call to Curl_Transfer().
1196 * Note that headers are created in a preallocated buffer of a default size.
1197 * That buffer can be enlarged on demand, but it is never shrinken again.
1199 * Parts of this function was once written by the friendly Mark Butler
1200 * <butlerm@xmission.com>.
1204 Transfer(struct connectdata
*conn
)
1206 struct SessionHandle
*data
= conn
->data
;
1208 struct Curl_transfer_keeper
*k
= &conn
->keep
;
1211 Curl_readwrite_init(conn
);
1213 if((conn
->sockfd
== -1) && (conn
->writesockfd
== -1))
1214 /* nothing to read, nothing to write, we're already OK! */
1217 /* we want header and/or body, if neither then don't do this! */
1218 if(!conn
->bits
.getheader
&& data
->set
.no_body
)
1221 k
->writefdp
= &k
->writefd
; /* store the address of the set */
1222 k
->readfdp
= &k
->readfd
; /* store the address of the set */
1225 struct timeval interval
;
1226 k
->readfd
= k
->rkeepfd
; /* set these every lap in the loop */
1227 k
->writefd
= k
->wkeepfd
;
1228 interval
.tv_sec
= 1;
1229 interval
.tv_usec
= 0;
1231 switch (select (k
->maxfd
, k
->readfdp
, k
->writefdp
, NULL
, &interval
)) {
1232 case -1: /* select() error, stop reading */
1234 /* The EINTR is not serious, and it seems you might get this more
1235 ofen when using the lib in a multi-threaded environment! */
1240 done
= TRUE
; /* no more read or write */
1242 case 0: /* timeout */
1243 result
= Curl_readwrite(conn
, &done
);
1246 default: /* readable descriptors */
1247 result
= Curl_readwrite(conn
, &done
);
1253 /* "done" signals to us if the transfer(s) are ready */
1259 CURLcode
Curl_pretransfer(struct SessionHandle
*data
)
1261 if(!data
->change
.url
)
1262 /* we can't do anything wihout URL */
1263 return CURLE_URL_MALFORMAT
;
1266 /* Init the SSL session ID cache here. We do it here since we want to
1267 do it after the *_setopt() calls (that could change the size) but
1268 before any transfer. */
1269 Curl_SSL_InitSessions(data
, data
->set
.ssl
.numsessions
);
1272 data
->set
.followlocation
=0; /* reset the location-follow counter */
1273 data
->state
.this_is_a_follow
= FALSE
; /* reset this */
1274 data
->state
.errorbuf
= FALSE
; /* no error has occurred */
1276 /* If there was a list of cookie files to read and we haven't done it before,
1278 if(data
->change
.cookielist
) {
1279 struct curl_slist
*list
= data
->change
.cookielist
;
1281 data
->cookies
= Curl_cookie_init(list
->data
,
1283 data
->set
.cookiesession
);
1286 curl_slist_free_all(data
->change
.cookielist
); /* clean up list */
1287 data
->change
.cookielist
= NULL
; /* don't do this again! */
1292 /* Allow data->set.use_port to set which port to use. This needs to be
1293 * disabled for example when we follow Location: headers to URLs using
1294 * different ports! */
1295 data
->state
.allow_port
= TRUE
;
1297 #if defined(HAVE_SIGNAL) && defined(SIGPIPE)
1298 /*************************************************************
1299 * Tell signal handler to ignore SIGPIPE
1300 *************************************************************/
1301 if(!data
->set
.no_signal
)
1302 data
->state
.prev_signal
= signal(SIGPIPE
, SIG_IGN
);
1305 Curl_initinfo(data
); /* reset session-specific information "variables" */
1306 Curl_pgrsStartNow(data
);
1311 CURLcode
Curl_posttransfer(struct SessionHandle
*data
)
1313 #if defined(HAVE_SIGNAL) && defined(SIGPIPE)
1314 /* restore the signal handler for SIGPIPE before we get back */
1315 if(!data
->set
.no_signal
)
1316 signal(SIGPIPE
, data
->state
.prev_signal
);
1322 CURLcode
Curl_follow(struct SessionHandle
*data
,
1323 char *newurl
) /* this 'newurl' is the Location: string,
1324 and it must be malloc()ed before passed
1327 /* Location: redirect */
1328 char prot
[16]; /* URL protocol string storage */
1329 char letter
; /* used for a silly sscanf */
1331 if (data
->set
.maxredirs
&&
1332 (data
->set
.followlocation
>= data
->set
.maxredirs
)) {
1333 failf(data
,"Maximum (%d) redirects followed", data
->set
.maxredirs
);
1334 return CURLE_TOO_MANY_REDIRECTS
;
1337 /* mark the next request as a followed location: */
1338 data
->state
.this_is_a_follow
= TRUE
;
1340 data
->set
.followlocation
++; /* count location-followers */
1342 if(data
->set
.http_auto_referer
) {
1343 /* We are asked to automatically set the previous URL as the
1344 referer when we get the next URL. We pick the ->url field,
1345 which may or may not be 100% correct */
1347 if(data
->change
.referer_alloc
)
1348 /* If we already have an allocated referer, free this first */
1349 free(data
->change
.referer
);
1351 data
->change
.referer
= strdup(data
->change
.url
);
1352 data
->change
.referer_alloc
= TRUE
; /* yes, free this later */
1355 if(2 != sscanf(newurl
, "%15[^?&/:]://%c", prot
, &letter
)) {
1357 *DANG* this is an RFC 2068 violation. The URL is supposed
1358 to be absolute and this doesn't seem to be that!
1360 Instead, we have to TRY to append this new path to the old URL
1361 to the right of the host part. Oh crap, this is doomed to cause
1362 problems in the future...
1368 char *useurl
= newurl
;
1370 /* we must make our own copy of the URL to play with, as it may
1371 point to read-only data */
1372 char *url_clone
=strdup(data
->change
.url
);
1375 return CURLE_OUT_OF_MEMORY
; /* skip out of this NOW */
1377 /* protsep points to the start of the host name */
1378 protsep
=strstr(url_clone
, "//");
1382 protsep
+=2; /* pass the slashes */
1384 if('/' != newurl
[0]) {
1387 /* First we need to find out if there's a ?-letter in the URL,
1388 and cut it and the right-side of that off */
1389 pathsep
= strrchr(protsep
, '?');
1393 /* we have a relative path to append to the last slash if
1394 there's one available */
1395 pathsep
= strrchr(protsep
, '/');
1399 /* Check if there's any slash after the host name, and if so,
1400 remember that position instead */
1401 pathsep
= strchr(protsep
, '/');
1403 protsep
= pathsep
+1;
1407 /* now deal with one "./" or any amount of "../" in the newurl
1408 and act accordingly */
1410 if((useurl
[0] == '.') && (useurl
[1] == '/'))
1411 useurl
+=2; /* just skip the "./" */
1413 while((useurl
[0] == '.') &&
1414 (useurl
[1] == '.') &&
1415 (useurl
[2] == '/')) {
1417 useurl
+=3; /* pass the "../" */
1422 /* cut off one more level from the right of the original URL */
1423 pathsep
= strrchr(protsep
, '/');
1434 /* We got a new absolute path for this server, cut off from the
1436 pathsep
= strchr(protsep
, '/');
1441 newest
=(char *)malloc( strlen(url_clone
) +
1442 1 + /* possible slash */
1443 strlen(useurl
) + 1/* zero byte */);
1446 return CURLE_OUT_OF_MEMORY
; /* go out from this */
1448 sprintf(newest
, "%s%s%s", url_clone
,
1449 (('/' == useurl
[0]) || (protsep
&& !*protsep
))?"":"/",
1451 free(newurl
); /* newurl is the allocated pointer */
1456 /* This is an absolute URL, don't allow the custom port number */
1457 data
->state
.allow_port
= FALSE
;
1459 if(data
->change
.url_alloc
)
1460 free(data
->change
.url
);
1462 data
->change
.url_alloc
= TRUE
; /* the URL is allocated */
1464 /* TBD: set the URL with curl_setopt() */
1465 data
->change
.url
= newurl
;
1466 newurl
= NULL
; /* don't free! */
1468 infof(data
, "Follows Location: to new URL: '%s'\n", data
->change
.url
);
1471 * We get here when the HTTP code is 300-399. We need to perform
1472 * differently based on exactly what return code there was.
1473 * Discussed on the curl mailing list and posted about on the 26th
1476 switch(data
->info
.httpcode
) {
1477 case 300: /* Multiple Choices */
1478 case 306: /* Not used */
1479 case 307: /* Temporary Redirect */
1480 default: /* for all unknown ones */
1481 /* These are explicitly mention since I've checked RFC2616 and they
1482 * seem to be OK to POST to.
1485 case 301: /* Moved Permanently */
1486 /* (quote from RFC2616, section 10.3.2):
1488 * Note: When automatically redirecting a POST request after
1489 * receiving a 301 status code, some existing HTTP/1.0 user agents
1490 * will erroneously change it into a GET request.
1493 * Warning: Because most of importants user agents do this clear
1494 * RFC2616 violation, many webservers expect this misbehavior. So
1495 * these servers often answers to a POST request with an error page.
1496 * To be sure that libcurl gets the page that most user agents
1497 * would get, libcurl has to force GET:
1499 if( data
->set
.httpreq
== HTTPREQ_POST
1500 || data
->set
.httpreq
== HTTPREQ_POST_FORM
) {
1502 "Violate RFC 2616/10.3.2 and switch from POST to GET\n");
1503 data
->set
.httpreq
= HTTPREQ_GET
;
1506 case 302: /* Found */
1509 Note: RFC 1945 and RFC 2068 specify that the client is not allowed
1510 to change the method on the redirected request. However, most
1511 existing user agent implementations treat 302 as if it were a 303
1512 response, performing a GET on the Location field-value regardless
1513 of the original request method. The status codes 303 and 307 have
1514 been added for servers that wish to make unambiguously clear which
1515 kind of reaction is expected of the client.
1519 Note: Many pre-HTTP/1.1 user agents do not understand the 303
1520 status. When interoperability with such clients is a concern, the
1521 302 status code may be used instead, since most user agents react
1522 to a 302 response as described here for 303.
1524 case 303: /* See Other */
1525 /* Disable both types of POSTs, since doing a second POST when
1526 * following isn't what anyone would want! */
1527 if(data
->set
.httpreq
!= HTTPREQ_GET
) {
1528 data
->set
.httpreq
= HTTPREQ_GET
; /* enforce GET request */
1529 infof(data
, "Disables POST, goes with %s\n",
1530 data
->set
.no_body
?"HEAD":"GET");
1533 case 304: /* Not Modified */
1534 /* 304 means we did a conditional request and it was "Not modified".
1535 * We shouldn't get any Location: header in this response!
1538 case 305: /* Use Proxy */
1539 /* (quote from RFC2616, section 10.3.6):
1540 * "The requested resource MUST be accessed through the proxy given
1541 * by the Location field. The Location field gives the URI of the
1542 * proxy. The recipient is expected to repeat this single request
1543 * via the proxy. 305 responses MUST only be generated by origin
1548 Curl_pgrsTime(data
, TIMER_REDIRECT
);
1549 Curl_pgrsResetTimes(data
);
1554 CURLcode
Curl_perform(struct SessionHandle
*data
)
1558 struct connectdata
*conn
=NULL
;
1559 char *newurl
= NULL
; /* possibly a new URL to follow to! */
1561 data
->state
.used_interface
= Curl_if_easy
;
1563 res
= Curl_pretransfer(data
);
1568 * It is important that there is NO 'return' from this function at any other
1569 * place than falling down to the end of the function! This is because we
1570 * have cleanup stuff that must be done before we get back, and that is only
1571 * performed after this do-while loop.
1575 Curl_pgrsTime(data
, TIMER_STARTSINGLE
);
1576 res
= Curl_connect(data
, &conn
);
1577 if(res
== CURLE_OK
) {
1578 res
= Curl_do(&conn
);
1580 if(res
== CURLE_OK
) {
1581 CURLcode res2
; /* just a local extra result container */
1583 if(conn
->protocol
&PROT_FTPS
)
1584 /* FTPS, disable ssl while transfering data */
1585 conn
->ssl
.use
= FALSE
;
1586 res
= Transfer(conn
); /* now fetch that URL please */
1587 if(conn
->protocol
&PROT_FTPS
)
1588 /* FTPS, enable ssl again after havving transferred data */
1589 conn
->ssl
.use
= TRUE
;
1593 * We must duplicate the new URL here as the connection data
1594 * may be free()ed in the Curl_done() function.
1596 newurl
= conn
->newurl
?strdup(conn
->newurl
):NULL
;
1598 /* The transfer phase returned error, we mark the connection to get
1599 * closed to prevent being re-used. This is becasue we can't
1600 * possibly know if the connection is in a good shape or not now. */
1601 conn
->bits
.close
= TRUE
;
1603 if(-1 !=conn
->secondarysocket
) {
1604 /* if we failed anywhere, we must clean up the secondary socket if
1606 sclose(conn
->secondarysocket
);
1607 conn
->secondarysocket
=-1;
1611 /* Always run Curl_done(), even if some of the previous calls
1612 failed, but return the previous (original) error code */
1613 res2
= Curl_done(conn
);
1620 * Important: 'conn' cannot be used here, since it may have been closed
1621 * in 'Curl_done' or other functions.
1624 if((res
== CURLE_OK
) && newurl
) {
1625 res
= Curl_follow(data
, newurl
);
1626 if(CURLE_OK
== res
) {
1632 break; /* it only reaches here when this shouldn't loop */
1634 } while(1); /* loop if Location: */
1639 /* run post-transfer uncondionally, but don't clobber the return code if
1640 we already have an error code recorder */
1641 res2
= Curl_posttransfer(data
);
1649 Curl_Transfer(struct connectdata
*c_conn
, /* connection data */
1650 int sockfd
, /* socket to read from or -1 */
1651 int size
, /* -1 if unknown at this point */
1652 bool getheader
, /* TRUE if header parsing is wanted */
1653 long *bytecountp
, /* return number of bytes read or NULL */
1654 int writesockfd
, /* socket to write to, it may very well be
1655 the same we read from. -1 disables */
1656 long *writebytecountp
/* return number of bytes written or
1660 struct connectdata
*conn
= (struct connectdata
*)c_conn
;
1662 return CURLE_BAD_FUNCTION_ARGUMENT
;
1664 /* now copy all input parameters */
1665 conn
->sockfd
= sockfd
;
1667 conn
->bits
.getheader
= getheader
;
1668 conn
->bytecountp
= bytecountp
;
1669 conn
->writesockfd
= writesockfd
;
1670 conn
->writebytecountp
= writebytecountp
;
1678 * eval: (load-file "../curl-mode.el")
1680 * vim600: fdm=marker
1681 * vim: et sw=2 ts=2 sts=2 tw=78