4 * Generic datagram handling routines. These are generic for all
5 * protocols. Possibly a generic IP version on top of these would
6 * make sense. Not tonight however 8-).
7 * This is used because UDP, RAW, PACKET, DDP, IPX, AX.25 and
8 * NetROM layer all have identical poll code and mostly
9 * identical recvmsg() code. So we share it here. The poll was
10 * shared before but buried in udp.c so I moved it.
12 * Authors: Alan Cox <alan@redhat.com>. (datagram_poll() from old
16 * Alan Cox : NULL return from skb_peek_copy()
18 * Alan Cox : Rewrote skb_read_datagram to avoid the
19 * skb_peek_copy stuff.
20 * Alan Cox : Added support for SOCK_SEQPACKET.
21 * IPX can no longer use the SO_TYPE hack
22 * but AX.25 now works right, and SPX is
24 * Alan Cox : Fixed write poll of non IP protocol
26 * Florian La Roche: Changed for my new skbuff handling.
27 * Darryl Miles : Fixed non-blocking SOCK_SEQPACKET.
28 * Linus Torvalds : BSD semantic fixes.
29 * Alan Cox : Datagram iovec handling
30 * Darryl Miles : Fixed non-blocking SOCK_STREAM.
31 * Alan Cox : POSIXisms
32 * Pete Wyckoff : Unconnected accept() fix.
36 #include <linux/module.h>
37 #include <linux/types.h>
38 #include <linux/kernel.h>
39 #include <asm/uaccess.h>
40 #include <asm/system.h>
42 #include <linux/interrupt.h>
43 #include <linux/errno.h>
44 #include <linux/sched.h>
45 #include <linux/inet.h>
46 #include <linux/netdevice.h>
47 #include <linux/rtnetlink.h>
48 #include <linux/poll.h>
49 #include <linux/highmem.h>
51 #include <net/protocol.h>
52 #include <linux/skbuff.h>
54 #include <net/checksum.h>
56 #include <net/tcp_states.h>
59 * Is a socket 'connection oriented' ?
61 static inline int connection_based(struct sock
*sk
)
63 return sk
->sk_type
== SOCK_SEQPACKET
|| sk
->sk_type
== SOCK_STREAM
;
69 static int wait_for_packet(struct sock
*sk
, int *err
, long *timeo_p
)
74 prepare_to_wait_exclusive(sk
->sk_sleep
, &wait
, TASK_INTERRUPTIBLE
);
77 error
= sock_error(sk
);
81 if (!skb_queue_empty(&sk
->sk_receive_queue
))
84 /* Socket shut down? */
85 if (sk
->sk_shutdown
& RCV_SHUTDOWN
)
88 /* Sequenced packets can come disconnected.
89 * If so we report the problem
92 if (connection_based(sk
) &&
93 !(sk
->sk_state
== TCP_ESTABLISHED
|| sk
->sk_state
== TCP_LISTEN
))
97 if (signal_pending(current
))
101 *timeo_p
= schedule_timeout(*timeo_p
);
103 finish_wait(sk
->sk_sleep
, &wait
);
106 error
= sock_intr_errno(*timeo_p
);
117 * skb_recv_datagram - Receive a datagram skbuff
120 * @noblock: blocking operation?
121 * @err: error code returned
123 * Get a datagram skbuff, understands the peeking, nonblocking wakeups
124 * and possible races. This replaces identical code in packet, raw and
125 * udp, as well as the IPX AX.25 and Appletalk. It also finally fixes
126 * the long standing peek and read race for datagram sockets. If you
127 * alter this routine remember it must be re-entrant.
129 * This function will lock the socket if a skb is returned, so the caller
130 * needs to unlock the socket in that case (usually by calling
133 * * It does not lock socket since today. This function is
134 * * free of race conditions. This measure should/can improve
135 * * significantly datagram socket latencies at high loads,
136 * * when data copying to user space takes lots of time.
137 * * (BTW I've just killed the last cli() in IP/IPv6/core/netlink/packet
141 * The order of the tests when we find no data waiting are specified
142 * quite explicitly by POSIX 1003.1g, don't change them without having
143 * the standard around please.
145 struct sk_buff
*skb_recv_datagram(struct sock
*sk
, unsigned flags
,
146 int noblock
, int *err
)
151 * Caller is allowed not to check sk->sk_err before skb_recv_datagram()
153 int error
= sock_error(sk
);
158 timeo
= sock_rcvtimeo(sk
, noblock
);
161 /* Again only user level code calls this function, so nothing
162 * interrupt level will suddenly eat the receive_queue.
164 * Look at current nfs client by the way...
165 * However, this function was corrent in any case. 8)
167 if (flags
& MSG_PEEK
) {
168 unsigned long cpu_flags
;
170 spin_lock_irqsave(&sk
->sk_receive_queue
.lock
,
172 skb
= skb_peek(&sk
->sk_receive_queue
);
174 atomic_inc(&skb
->users
);
175 spin_unlock_irqrestore(&sk
->sk_receive_queue
.lock
,
178 skb
= skb_dequeue(&sk
->sk_receive_queue
);
183 /* User doesn't want to wait */
188 } while (!wait_for_packet(sk
, err
, &timeo
));
197 void skb_free_datagram(struct sock
*sk
, struct sk_buff
*skb
)
203 * skb_copy_datagram_iovec - Copy a datagram to an iovec.
204 * @skb: buffer to copy
205 * @offset: offset in the buffer to start copying from
206 * @to: io vector to copy to
207 * @len: amount of data to copy from buffer to iovec
209 * Note: the iovec is modified during the copy.
211 int skb_copy_datagram_iovec(const struct sk_buff
*skb
, int offset
,
212 struct iovec
*to
, int len
)
214 int start
= skb_headlen(skb
);
215 int i
, copy
= start
- offset
;
221 if (memcpy_toiovec(to
, skb
->data
+ offset
, copy
))
223 if ((len
-= copy
) == 0)
228 /* Copy paged appendix. Hmm... why does this look so complicated? */
229 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
232 BUG_TRAP(start
<= offset
+ len
);
234 end
= start
+ skb_shinfo(skb
)->frags
[i
].size
;
235 if ((copy
= end
- offset
) > 0) {
238 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
239 struct page
*page
= frag
->page
;
244 err
= memcpy_toiovec(to
, vaddr
+ frag
->page_offset
+
245 offset
- start
, copy
);
256 if (skb_shinfo(skb
)->frag_list
) {
257 struct sk_buff
*list
= skb_shinfo(skb
)->frag_list
;
259 for (; list
; list
= list
->next
) {
262 BUG_TRAP(start
<= offset
+ len
);
264 end
= start
+ list
->len
;
265 if ((copy
= end
- offset
) > 0) {
268 if (skb_copy_datagram_iovec(list
,
272 if ((len
-= copy
) == 0)
286 static int skb_copy_and_csum_datagram(const struct sk_buff
*skb
, int offset
,
287 u8 __user
*to
, int len
,
290 int start
= skb_headlen(skb
);
292 int i
, copy
= start
- offset
;
299 *csump
= csum_and_copy_to_user(skb
->data
+ offset
, to
, copy
,
303 if ((len
-= copy
) == 0)
310 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
313 BUG_TRAP(start
<= offset
+ len
);
315 end
= start
+ skb_shinfo(skb
)->frags
[i
].size
;
316 if ((copy
= end
- offset
) > 0) {
320 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
321 struct page
*page
= frag
->page
;
326 csum2
= csum_and_copy_to_user(vaddr
+
333 *csump
= csum_block_add(*csump
, csum2
, pos
);
343 if (skb_shinfo(skb
)->frag_list
) {
344 struct sk_buff
*list
= skb_shinfo(skb
)->frag_list
;
346 for (; list
; list
=list
->next
) {
349 BUG_TRAP(start
<= offset
+ len
);
351 end
= start
+ list
->len
;
352 if ((copy
= end
- offset
) > 0) {
353 unsigned int csum2
= 0;
356 if (skb_copy_and_csum_datagram(list
,
361 *csump
= csum_block_add(*csump
, csum2
, pos
);
362 if ((len
-= copy
) == 0)
379 * skb_copy_and_csum_datagram_iovec - Copy and checkum skb to user iovec.
381 * @hlen: hardware length
384 * Caller _must_ check that skb will fit to this iovec.
386 * Returns: 0 - success.
387 * -EINVAL - checksum failure.
388 * -EFAULT - fault during copy. Beware, in this case iovec
391 int skb_copy_and_csum_datagram_iovec(const struct sk_buff
*skb
,
392 int hlen
, struct iovec
*iov
)
395 int chunk
= skb
->len
- hlen
;
397 /* Skip filled elements.
398 * Pretty silly, look at memcpy_toiovec, though 8)
400 while (!iov
->iov_len
)
403 if (iov
->iov_len
< chunk
) {
404 if ((unsigned short)csum_fold(skb_checksum(skb
, 0, chunk
+ hlen
,
407 if (skb_copy_datagram_iovec(skb
, hlen
, iov
, chunk
))
410 csum
= csum_partial(skb
->data
, hlen
, skb
->csum
);
411 if (skb_copy_and_csum_datagram(skb
, hlen
, iov
->iov_base
,
414 if ((unsigned short)csum_fold(csum
))
416 iov
->iov_len
-= chunk
;
417 iov
->iov_base
+= chunk
;
427 * datagram_poll - generic datagram poll
432 * Datagram poll: Again totally generic. This also handles
433 * sequenced packet sockets providing the socket receive queue
434 * is only ever holding data ready to receive.
436 * Note: when you _don't_ use this routine for this protocol,
437 * and you use a different write policy from sock_writeable()
438 * then please supply your own write_space callback.
440 unsigned int datagram_poll(struct file
*file
, struct socket
*sock
,
443 struct sock
*sk
= sock
->sk
;
446 poll_wait(file
, sk
->sk_sleep
, wait
);
449 /* exceptional events? */
450 if (sk
->sk_err
|| !skb_queue_empty(&sk
->sk_error_queue
))
452 if (sk
->sk_shutdown
== SHUTDOWN_MASK
)
456 if (!skb_queue_empty(&sk
->sk_receive_queue
) ||
457 (sk
->sk_shutdown
& RCV_SHUTDOWN
))
458 mask
|= POLLIN
| POLLRDNORM
;
460 /* Connection-based need to check for termination and startup */
461 if (connection_based(sk
)) {
462 if (sk
->sk_state
== TCP_CLOSE
)
464 /* connection hasn't started yet? */
465 if (sk
->sk_state
== TCP_SYN_SENT
)
470 if (sock_writeable(sk
))
471 mask
|= POLLOUT
| POLLWRNORM
| POLLWRBAND
;
473 set_bit(SOCK_ASYNC_NOSPACE
, &sk
->sk_socket
->flags
);
478 EXPORT_SYMBOL(datagram_poll
);
479 EXPORT_SYMBOL(skb_copy_and_csum_datagram_iovec
);
480 EXPORT_SYMBOL(skb_copy_datagram_iovec
);
481 EXPORT_SYMBOL(skb_free_datagram
);
482 EXPORT_SYMBOL(skb_recv_datagram
);