2 Unix SMB/CIFS implementation.
4 Copyright (C) Volker Lendecke 2008
6 ** NOTE! The following LGPL license applies to the async_sock
7 ** library. This does NOT imply that all of Samba is released
10 This library is free software; you can redistribute it and/or
11 modify it under the terms of the GNU Lesser General Public
12 License as published by the Free Software Foundation; either
13 version 3 of the License, or (at your option) any later version.
15 This library is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 Library General Public License for more details.
20 You should have received a copy of the GNU Lesser General Public License
21 along with this program. If not, see <http://www.gnu.org/licenses/>.
25 #include "system/network.h"
26 #include "system/filesys.h"
29 #include "lib/async_req/async_sock.h"
30 #include "lib/util/iov_buf.h"
31 #include "lib/util/util_net.h"
33 /* Note: lib/util/ is currently GPL */
34 #include "lib/util/tevent_unix.h"
35 #include "lib/util/samba_util.h"
37 struct async_connect_state
{
39 struct tevent_fd
*fde
;
42 socklen_t address_len
;
43 struct sockaddr_storage address
;
45 void (*before_connect
)(void *private_data
);
46 void (*after_connect
)(void *private_data
);
50 static void async_connect_cleanup(struct tevent_req
*req
,
51 enum tevent_req_state req_state
);
52 static void async_connect_connected(struct tevent_context
*ev
,
53 struct tevent_fd
*fde
, uint16_t flags
,
57 * @brief async version of connect(2)
58 * @param[in] mem_ctx The memory context to hang the result off
59 * @param[in] ev The event context to work from
60 * @param[in] fd The socket to recv from
61 * @param[in] address Where to connect?
62 * @param[in] address_len Length of *address
63 * @retval The async request
65 * This function sets the socket into non-blocking state to be able to call
66 * connect in an async state. This will be reset when the request is finished.
69 struct tevent_req
*async_connect_send(
70 TALLOC_CTX
*mem_ctx
, struct tevent_context
*ev
, int fd
,
71 const struct sockaddr
*address
, socklen_t address_len
,
72 void (*before_connect
)(void *private_data
),
73 void (*after_connect
)(void *private_data
),
76 struct tevent_req
*req
;
77 struct async_connect_state
*state
;
80 req
= tevent_req_create(mem_ctx
, &state
, struct async_connect_state
);
86 * We have to set the socket to nonblocking for async connect(2). Keep
87 * the old sockflags around.
91 state
->before_connect
= before_connect
;
92 state
->after_connect
= after_connect
;
93 state
->private_data
= private_data
;
95 state
->old_sockflags
= fcntl(fd
, F_GETFL
, 0);
96 if (state
->old_sockflags
== -1) {
97 tevent_req_error(req
, errno
);
98 return tevent_req_post(req
, ev
);
101 tevent_req_set_cleanup_fn(req
, async_connect_cleanup
);
103 state
->address_len
= address_len
;
104 if (address_len
> sizeof(state
->address
)) {
105 tevent_req_error(req
, EINVAL
);
106 return tevent_req_post(req
, ev
);
108 memcpy(&state
->address
, address
, address_len
);
110 ret
= set_blocking(fd
, false);
112 tevent_req_error(req
, errno
);
113 return tevent_req_post(req
, ev
);
116 if (state
->before_connect
!= NULL
) {
117 state
->before_connect(state
->private_data
);
120 state
->result
= connect(fd
, address
, address_len
);
122 if (state
->after_connect
!= NULL
) {
123 state
->after_connect(state
->private_data
);
126 if (state
->result
== 0) {
127 tevent_req_done(req
);
128 return tevent_req_post(req
, ev
);
132 * The only errno indicating that an initial connect is still
133 * in flight is EINPROGRESS.
135 * This allows callers like open_socket_out_send() to reuse
136 * fds and call us with an fd for which the connect is still
137 * in flight. The proper thing to do for callers would be
138 * closing the fd and starting from scratch with a fresh
142 if (errno
!= EINPROGRESS
) {
143 tevent_req_error(req
, errno
);
144 return tevent_req_post(req
, ev
);
147 state
->fde
= tevent_add_fd(ev
, state
, fd
,
148 TEVENT_FD_ERROR
|TEVENT_FD_WRITE
,
149 async_connect_connected
, req
);
150 if (state
->fde
== NULL
) {
151 tevent_req_error(req
, ENOMEM
);
152 return tevent_req_post(req
, ev
);
157 static void async_connect_cleanup(struct tevent_req
*req
,
158 enum tevent_req_state req_state
)
160 struct async_connect_state
*state
=
161 tevent_req_data(req
, struct async_connect_state
);
163 TALLOC_FREE(state
->fde
);
164 if (state
->fd
!= -1) {
167 ret
= fcntl(state
->fd
, F_SETFL
, state
->old_sockflags
);
177 * fde event handler for connect(2)
178 * @param[in] ev The event context that sent us here
179 * @param[in] fde The file descriptor event associated with the connect
180 * @param[in] flags Indicate read/writeability of the socket
181 * @param[in] priv private data, "struct async_req *" in this case
184 static void async_connect_connected(struct tevent_context
*ev
,
185 struct tevent_fd
*fde
, uint16_t flags
,
188 struct tevent_req
*req
= talloc_get_type_abort(
189 priv
, struct tevent_req
);
190 struct async_connect_state
*state
=
191 tevent_req_data(req
, struct async_connect_state
);
193 int socket_error
= 0;
194 socklen_t slen
= sizeof(socket_error
);
196 ret
= getsockopt(state
->fd
, SOL_SOCKET
, SO_ERROR
,
197 &socket_error
, &slen
);
201 * According to Stevens this is the Solaris behaviour
202 * in case the connection encountered an error:
203 * getsockopt() fails, error is in errno
205 tevent_req_error(req
, errno
);
209 if (socket_error
!= 0) {
211 * Berkeley derived implementations (including) Linux
212 * return the pending error via socket_error.
214 tevent_req_error(req
, socket_error
);
218 tevent_req_done(req
);
222 int async_connect_recv(struct tevent_req
*req
, int *perrno
)
224 int err
= tevent_req_simple_recv_unix(req
);
234 struct writev_state
{
235 struct tevent_context
*ev
;
236 struct tevent_queue_entry
*queue_entry
;
238 struct tevent_fd
*fde
;
243 bool err_on_readability
;
246 static void writev_cleanup(struct tevent_req
*req
,
247 enum tevent_req_state req_state
);
248 static bool writev_cancel(struct tevent_req
*req
);
249 static void writev_trigger(struct tevent_req
*req
, void *private_data
);
250 static void writev_handler(struct tevent_context
*ev
, struct tevent_fd
*fde
,
251 uint16_t flags
, void *private_data
);
253 struct tevent_req
*writev_send(TALLOC_CTX
*mem_ctx
, struct tevent_context
*ev
,
254 struct tevent_queue
*queue
, int fd
,
255 bool err_on_readability
,
256 struct iovec
*iov
, int count
)
258 struct tevent_req
*req
;
259 struct writev_state
*state
;
261 req
= tevent_req_create(mem_ctx
, &state
, struct writev_state
);
267 state
->total_size
= 0;
268 state
->count
= count
;
269 state
->iov
= (struct iovec
*)talloc_memdup(
270 state
, iov
, sizeof(struct iovec
) * count
);
271 if (tevent_req_nomem(state
->iov
, req
)) {
272 return tevent_req_post(req
, ev
);
274 state
->flags
= TEVENT_FD_WRITE
| TEVENT_FD_ERROR
;
275 if (err_on_readability
) {
276 state
->flags
|= TEVENT_FD_READ
;
279 tevent_req_set_cleanup_fn(req
, writev_cleanup
);
280 tevent_req_set_cancel_fn(req
, writev_cancel
);
283 state
->fde
= tevent_add_fd(state
->ev
, state
, state
->fd
,
284 state
->flags
, writev_handler
, req
);
285 if (tevent_req_nomem(state
->fde
, req
)) {
286 return tevent_req_post(req
, ev
);
292 * writev_trigger tries a nonblocking write. If that succeeds,
293 * we can't directly notify the callback to call
294 * writev_recv. The callback would TALLOC_FREE(req) after
295 * calling writev_recv even before writev_trigger can inspect
298 tevent_req_defer_callback(req
, ev
);
300 state
->queue_entry
= tevent_queue_add_optimize_empty(
301 queue
, ev
, req
, writev_trigger
, NULL
);
302 if (tevent_req_nomem(state
->queue_entry
, req
)) {
303 return tevent_req_post(req
, ev
);
305 if (!tevent_req_is_in_progress(req
)) {
306 return tevent_req_post(req
, ev
);
311 static void writev_cleanup(struct tevent_req
*req
,
312 enum tevent_req_state req_state
)
314 struct writev_state
*state
= tevent_req_data(req
, struct writev_state
);
316 TALLOC_FREE(state
->queue_entry
);
317 TALLOC_FREE(state
->fde
);
320 static bool writev_cancel(struct tevent_req
*req
)
322 struct writev_state
*state
= tevent_req_data(req
, struct writev_state
);
324 if (state
->total_size
> 0) {
326 * We've already started to write :-(
331 TALLOC_FREE(state
->queue_entry
);
332 TALLOC_FREE(state
->fde
);
334 tevent_req_defer_callback(req
, state
->ev
);
335 tevent_req_error(req
, ECANCELED
);
339 static void writev_do(struct tevent_req
*req
, struct writev_state
*state
)
344 written
= writev(state
->fd
, state
->iov
, state
->count
);
345 if ((written
== -1) &&
348 (errno
== EWOULDBLOCK
))) {
349 /* retry after going through the tevent loop */
353 tevent_req_error(req
, errno
);
357 tevent_req_error(req
, EPIPE
);
360 state
->total_size
+= written
;
362 ok
= iov_advance(&state
->iov
, &state
->count
, written
);
364 tevent_req_error(req
, EIO
);
368 if (state
->count
== 0) {
369 tevent_req_done(req
);
374 static void writev_trigger(struct tevent_req
*req
, void *private_data
)
376 struct writev_state
*state
= tevent_req_data(req
, struct writev_state
);
378 state
->queue_entry
= NULL
;
380 writev_do(req
, state
);
381 if (!tevent_req_is_in_progress(req
)) {
385 state
->fde
= tevent_add_fd(state
->ev
, state
, state
->fd
, state
->flags
,
386 writev_handler
, req
);
387 if (tevent_req_nomem(state
->fde
, req
)) {
392 static void writev_handler(struct tevent_context
*ev
, struct tevent_fd
*fde
,
393 uint16_t flags
, void *private_data
)
395 struct tevent_req
*req
= talloc_get_type_abort(
396 private_data
, struct tevent_req
);
397 struct writev_state
*state
=
398 tevent_req_data(req
, struct writev_state
);
400 if (flags
& TEVENT_FD_ERROR
) {
402 * There's an error, for legacy reasons
403 * we just use EPIPE instead of a more
404 * detailed error using
405 * samba_socket_poll_or_sock_error().
407 tevent_req_error(req
, EPIPE
);
411 if (flags
& TEVENT_FD_READ
) {
412 /* Readable and the caller wants an error on read. */
413 tevent_req_error(req
, EPIPE
);
417 writev_do(req
, state
);
420 ssize_t
writev_recv(struct tevent_req
*req
, int *perrno
)
422 struct writev_state
*state
=
423 tevent_req_data(req
, struct writev_state
);
426 if (tevent_req_is_unix_error(req
, perrno
)) {
427 tevent_req_received(req
);
430 ret
= state
->total_size
;
431 tevent_req_received(req
);
435 struct read_packet_state
{
437 struct tevent_fd
*fde
;
440 ssize_t (*more
)(uint8_t *buf
, size_t buflen
, void *private_data
);
444 static void read_packet_cleanup(struct tevent_req
*req
,
445 enum tevent_req_state req_state
);
446 static void read_packet_handler(struct tevent_context
*ev
,
447 struct tevent_fd
*fde
,
448 uint16_t flags
, void *private_data
);
450 struct tevent_req
*read_packet_send(TALLOC_CTX
*mem_ctx
,
451 struct tevent_context
*ev
,
452 int fd
, size_t initial
,
453 ssize_t (*more
)(uint8_t *buf
,
458 struct tevent_req
*req
;
459 struct read_packet_state
*state
;
461 req
= tevent_req_create(mem_ctx
, &state
, struct read_packet_state
);
468 state
->private_data
= private_data
;
470 tevent_req_set_cleanup_fn(req
, read_packet_cleanup
);
472 state
->buf
= talloc_array(state
, uint8_t, initial
);
473 if (tevent_req_nomem(state
->buf
, req
)) {
474 return tevent_req_post(req
, ev
);
477 state
->fde
= tevent_add_fd(ev
, state
, fd
,
478 TEVENT_FD_READ
, read_packet_handler
,
480 if (tevent_req_nomem(state
->fde
, req
)) {
481 return tevent_req_post(req
, ev
);
486 static void read_packet_cleanup(struct tevent_req
*req
,
487 enum tevent_req_state req_state
)
489 struct read_packet_state
*state
=
490 tevent_req_data(req
, struct read_packet_state
);
492 TALLOC_FREE(state
->fde
);
495 static void read_packet_handler(struct tevent_context
*ev
,
496 struct tevent_fd
*fde
,
497 uint16_t flags
, void *private_data
)
499 struct tevent_req
*req
= talloc_get_type_abort(
500 private_data
, struct tevent_req
);
501 struct read_packet_state
*state
=
502 tevent_req_data(req
, struct read_packet_state
);
503 size_t total
= talloc_get_size(state
->buf
);
507 nread
= recv(state
->fd
, state
->buf
+state
->nread
, total
-state
->nread
,
509 if ((nread
== -1) && (errno
== ENOTSOCK
)) {
510 nread
= read(state
->fd
, state
->buf
+state
->nread
,
513 if ((nread
== -1) && (errno
== EINTR
)) {
518 tevent_req_error(req
, errno
);
522 tevent_req_error(req
, EPIPE
);
526 state
->nread
+= nread
;
527 if (state
->nread
< total
) {
528 /* Come back later */
533 * We got what was initially requested. See if "more" asks for -- more.
535 if (state
->more
== NULL
) {
536 /* Nobody to ask, this is a async read_data */
537 tevent_req_done(req
);
541 more
= state
->more(state
->buf
, total
, state
->private_data
);
543 /* We got an invalid packet, tell the caller */
544 tevent_req_error(req
, EIO
);
548 /* We're done, full packet received */
549 tevent_req_done(req
);
553 if (total
+ more
< total
) {
554 tevent_req_error(req
, EMSGSIZE
);
558 tmp
= talloc_realloc(state
, state
->buf
, uint8_t, total
+more
);
559 if (tevent_req_nomem(tmp
, req
)) {
565 ssize_t
read_packet_recv(struct tevent_req
*req
, TALLOC_CTX
*mem_ctx
,
566 uint8_t **pbuf
, int *perrno
)
568 struct read_packet_state
*state
=
569 tevent_req_data(req
, struct read_packet_state
);
571 if (tevent_req_is_unix_error(req
, perrno
)) {
572 tevent_req_received(req
);
575 *pbuf
= talloc_move(mem_ctx
, &state
->buf
);
576 tevent_req_received(req
);
577 return talloc_get_size(*pbuf
);
580 struct wait_for_read_state
{
581 struct tevent_fd
*fde
;
586 static void wait_for_read_cleanup(struct tevent_req
*req
,
587 enum tevent_req_state req_state
);
588 static void wait_for_read_done(struct tevent_context
*ev
,
589 struct tevent_fd
*fde
,
593 struct tevent_req
*wait_for_read_send(TALLOC_CTX
*mem_ctx
,
594 struct tevent_context
*ev
, int fd
,
597 struct tevent_req
*req
;
598 struct wait_for_read_state
*state
;
600 req
= tevent_req_create(mem_ctx
, &state
, struct wait_for_read_state
);
605 tevent_req_set_cleanup_fn(req
, wait_for_read_cleanup
);
607 state
->fde
= tevent_add_fd(ev
, state
, fd
, TEVENT_FD_READ
,
608 wait_for_read_done
, req
);
609 if (tevent_req_nomem(state
->fde
, req
)) {
610 return tevent_req_post(req
, ev
);
614 state
->check_errors
= check_errors
;
618 static void wait_for_read_cleanup(struct tevent_req
*req
,
619 enum tevent_req_state req_state
)
621 struct wait_for_read_state
*state
=
622 tevent_req_data(req
, struct wait_for_read_state
);
624 TALLOC_FREE(state
->fde
);
627 static void wait_for_read_done(struct tevent_context
*ev
,
628 struct tevent_fd
*fde
,
632 struct tevent_req
*req
= talloc_get_type_abort(
633 private_data
, struct tevent_req
);
634 struct wait_for_read_state
*state
=
635 tevent_req_data(req
, struct wait_for_read_state
);
638 if ((flags
& TEVENT_FD_READ
) == 0) {
642 if (!state
->check_errors
) {
643 tevent_req_done(req
);
647 ret
= ioctl(state
->fd
, FIONREAD
, &available
);
649 if ((ret
== -1) && (errno
== EINTR
)) {
650 /* come back later */
655 tevent_req_error(req
, errno
);
659 if (available
== 0) {
660 tevent_req_error(req
, EPIPE
);
664 tevent_req_done(req
);
667 bool wait_for_read_recv(struct tevent_req
*req
, int *perr
)
669 int err
= tevent_req_simple_recv_unix(req
);
679 struct accept_state
{
680 struct tevent_fd
*fde
;
682 struct samba_sockaddr addr
;
686 static void accept_handler(struct tevent_context
*ev
, struct tevent_fd
*fde
,
687 uint16_t flags
, void *private_data
);
689 struct tevent_req
*accept_send(TALLOC_CTX
*mem_ctx
, struct tevent_context
*ev
,
692 struct tevent_req
*req
;
693 struct accept_state
*state
;
695 req
= tevent_req_create(mem_ctx
, &state
, struct accept_state
);
700 state
->listen_sock
= listen_sock
;
702 state
->fde
= tevent_add_fd(ev
, state
, listen_sock
, TEVENT_FD_READ
,
703 accept_handler
, req
);
704 if (tevent_req_nomem(state
->fde
, req
)) {
705 return tevent_req_post(req
, ev
);
710 static void accept_handler(struct tevent_context
*ev
, struct tevent_fd
*fde
,
711 uint16_t flags
, void *private_data
)
713 struct tevent_req
*req
= talloc_get_type_abort(
714 private_data
, struct tevent_req
);
715 struct accept_state
*state
= tevent_req_data(req
, struct accept_state
);
718 TALLOC_FREE(state
->fde
);
720 if ((flags
& TEVENT_FD_READ
) == 0) {
721 tevent_req_error(req
, EIO
);
725 state
->addr
.sa_socklen
= sizeof(state
->addr
.u
);
727 ret
= accept(state
->listen_sock
,
729 &state
->addr
.sa_socklen
);
730 if ((ret
== -1) && (errno
== EINTR
)) {
735 tevent_req_error(req
, errno
);
738 smb_set_close_on_exec(ret
);
740 tevent_req_done(req
);
743 int accept_recv(struct tevent_req
*req
,
745 struct samba_sockaddr
*paddr
,
748 struct accept_state
*state
= tevent_req_data(req
, struct accept_state
);
749 int sock
= state
->sock
;
752 if (tevent_req_is_unix_error(req
, &err
)) {
756 tevent_req_received(req
);
759 if (listen_sock
!= NULL
) {
760 *listen_sock
= state
->listen_sock
;
763 *paddr
= state
->addr
;
765 tevent_req_received(req
);