2 * linux/net/iucv/af_iucv.c
4 * IUCV protocol stack for Linux on zSeries
6 * Copyright 2006 IBM Corporation
8 * Author(s): Jennifer Hunt <jenhunt@us.ibm.com>
11 #include <linux/module.h>
12 #include <linux/types.h>
13 #include <linux/list.h>
14 #include <linux/errno.h>
15 #include <linux/kernel.h>
16 #include <linux/sched.h>
17 #include <linux/slab.h>
18 #include <linux/skbuff.h>
19 #include <linux/init.h>
20 #include <linux/poll.h>
22 #include <asm/ebcdic.h>
23 #include <asm/cpcmd.h>
24 #include <linux/kmod.h>
26 #include <net/iucv/iucv.h>
27 #include <net/iucv/af_iucv.h>
29 #define CONFIG_IUCV_SOCK_DEBUG 1
34 static char iucv_userid
[80];
36 static struct proto_ops iucv_sock_ops
;
38 static struct proto iucv_proto
= {
41 .obj_size
= sizeof(struct iucv_sock
),
44 /* Call Back functions */
45 static void iucv_callback_rx(struct iucv_path
*, struct iucv_message
*);
46 static void iucv_callback_txdone(struct iucv_path
*, struct iucv_message
*);
47 static void iucv_callback_connack(struct iucv_path
*, u8 ipuser
[16]);
48 static int iucv_callback_connreq(struct iucv_path
*, u8 ipvmid
[8],
50 static void iucv_callback_connrej(struct iucv_path
*, u8 ipuser
[16]);
52 static struct iucv_sock_list iucv_sk_list
= {
53 .lock
= RW_LOCK_UNLOCKED
,
54 .autobind_name
= ATOMIC_INIT(0)
57 static struct iucv_handler af_iucv_handler
= {
58 .path_pending
= iucv_callback_connreq
,
59 .path_complete
= iucv_callback_connack
,
60 .path_severed
= iucv_callback_connrej
,
61 .message_pending
= iucv_callback_rx
,
62 .message_complete
= iucv_callback_txdone
65 static inline void high_nmcpy(unsigned char *dst
, char *src
)
70 static inline void low_nmcpy(unsigned char *dst
, char *src
)
72 memcpy(&dst
[8], src
, 8);
76 static void iucv_sock_timeout(unsigned long arg
)
78 struct sock
*sk
= (struct sock
*)arg
;
81 sk
->sk_err
= ETIMEDOUT
;
82 sk
->sk_state_change(sk
);
89 static void iucv_sock_clear_timer(struct sock
*sk
)
91 sk_stop_timer(sk
, &sk
->sk_timer
);
94 static void iucv_sock_init_timer(struct sock
*sk
)
96 init_timer(&sk
->sk_timer
);
97 sk
->sk_timer
.function
= iucv_sock_timeout
;
98 sk
->sk_timer
.data
= (unsigned long)sk
;
101 static struct sock
*__iucv_get_sock_by_name(char *nm
)
104 struct hlist_node
*node
;
106 sk_for_each(sk
, node
, &iucv_sk_list
.head
)
107 if (!memcmp(&iucv_sk(sk
)->src_name
, nm
, 8))
113 static void iucv_sock_destruct(struct sock
*sk
)
115 skb_queue_purge(&sk
->sk_receive_queue
);
116 skb_queue_purge(&sk
->sk_write_queue
);
120 static void iucv_sock_cleanup_listen(struct sock
*parent
)
124 /* Close non-accepted connections */
125 while ((sk
= iucv_accept_dequeue(parent
, NULL
))) {
130 parent
->sk_state
= IUCV_CLOSED
;
131 sock_set_flag(parent
, SOCK_ZAPPED
);
135 static void iucv_sock_kill(struct sock
*sk
)
137 if (!sock_flag(sk
, SOCK_ZAPPED
) || sk
->sk_socket
)
140 iucv_sock_unlink(&iucv_sk_list
, sk
);
141 sock_set_flag(sk
, SOCK_DEAD
);
145 /* Close an IUCV socket */
146 static void iucv_sock_close(struct sock
*sk
)
148 unsigned char user_data
[16];
149 struct iucv_sock
*iucv
= iucv_sk(sk
);
153 iucv_sock_clear_timer(sk
);
156 switch (sk
->sk_state
) {
158 iucv_sock_cleanup_listen(sk
);
165 sk
->sk_state
= IUCV_CLOSING
;
166 sk
->sk_state_change(sk
);
168 if (!skb_queue_empty(&iucv
->send_skb_q
)) {
169 if (sock_flag(sk
, SOCK_LINGER
) && sk
->sk_lingertime
)
170 timeo
= sk
->sk_lingertime
;
172 timeo
= IUCV_DISCONN_TIMEOUT
;
173 err
= iucv_sock_wait_state(sk
, IUCV_CLOSED
, 0, timeo
);
176 sk
->sk_state
= IUCV_CLOSED
;
177 sk
->sk_state_change(sk
);
180 low_nmcpy(user_data
, iucv
->src_name
);
181 high_nmcpy(user_data
, iucv
->dst_name
);
182 ASCEBC(user_data
, sizeof(user_data
));
183 err
= iucv_path_sever(iucv
->path
, user_data
);
184 iucv_path_free(iucv
->path
);
188 sk
->sk_err
= ECONNRESET
;
189 sk
->sk_state_change(sk
);
191 skb_queue_purge(&iucv
->send_skb_q
);
192 skb_queue_purge(&iucv
->backlog_skb_q
);
194 sock_set_flag(sk
, SOCK_ZAPPED
);
198 sock_set_flag(sk
, SOCK_ZAPPED
);
206 static void iucv_sock_init(struct sock
*sk
, struct sock
*parent
)
209 sk
->sk_type
= parent
->sk_type
;
212 static struct sock
*iucv_sock_alloc(struct socket
*sock
, int proto
, gfp_t prio
)
216 sk
= sk_alloc(PF_IUCV
, prio
, &iucv_proto
, 1);
220 sock_init_data(sock
, sk
);
221 INIT_LIST_HEAD(&iucv_sk(sk
)->accept_q
);
222 spin_lock_init(&iucv_sk(sk
)->accept_q_lock
);
223 skb_queue_head_init(&iucv_sk(sk
)->send_skb_q
);
224 skb_queue_head_init(&iucv_sk(sk
)->backlog_skb_q
);
225 iucv_sk(sk
)->send_tag
= 0;
227 sk
->sk_destruct
= iucv_sock_destruct
;
228 sk
->sk_sndtimeo
= IUCV_CONN_TIMEOUT
;
229 sk
->sk_allocation
= GFP_DMA
;
231 sock_reset_flag(sk
, SOCK_ZAPPED
);
233 sk
->sk_protocol
= proto
;
234 sk
->sk_state
= IUCV_OPEN
;
236 iucv_sock_init_timer(sk
);
238 iucv_sock_link(&iucv_sk_list
, sk
);
242 /* Create an IUCV socket */
243 static int iucv_sock_create(struct socket
*sock
, int protocol
)
247 if (sock
->type
!= SOCK_STREAM
)
248 return -ESOCKTNOSUPPORT
;
250 sock
->state
= SS_UNCONNECTED
;
251 sock
->ops
= &iucv_sock_ops
;
253 sk
= iucv_sock_alloc(sock
, protocol
, GFP_KERNEL
);
257 iucv_sock_init(sk
, NULL
);
262 void iucv_sock_link(struct iucv_sock_list
*l
, struct sock
*sk
)
264 write_lock_bh(&l
->lock
);
265 sk_add_node(sk
, &l
->head
);
266 write_unlock_bh(&l
->lock
);
269 void iucv_sock_unlink(struct iucv_sock_list
*l
, struct sock
*sk
)
271 write_lock_bh(&l
->lock
);
272 sk_del_node_init(sk
);
273 write_unlock_bh(&l
->lock
);
276 void iucv_accept_enqueue(struct sock
*parent
, struct sock
*sk
)
279 struct iucv_sock
*par
= iucv_sk(parent
);
282 spin_lock_irqsave(&par
->accept_q_lock
, flags
);
283 list_add_tail(&iucv_sk(sk
)->accept_q
, &par
->accept_q
);
284 spin_unlock_irqrestore(&par
->accept_q_lock
, flags
);
285 iucv_sk(sk
)->parent
= parent
;
286 parent
->sk_ack_backlog
++;
289 void iucv_accept_unlink(struct sock
*sk
)
292 struct iucv_sock
*par
= iucv_sk(iucv_sk(sk
)->parent
);
294 spin_lock_irqsave(&par
->accept_q_lock
, flags
);
295 list_del_init(&iucv_sk(sk
)->accept_q
);
296 spin_unlock_irqrestore(&par
->accept_q_lock
, flags
);
297 iucv_sk(sk
)->parent
->sk_ack_backlog
--;
298 iucv_sk(sk
)->parent
= NULL
;
302 struct sock
*iucv_accept_dequeue(struct sock
*parent
, struct socket
*newsock
)
304 struct iucv_sock
*isk
, *n
;
307 list_for_each_entry_safe(isk
, n
, &iucv_sk(parent
)->accept_q
, accept_q
) {
308 sk
= (struct sock
*) isk
;
311 if (sk
->sk_state
== IUCV_CLOSED
) {
312 iucv_accept_unlink(sk
);
317 if (sk
->sk_state
== IUCV_CONNECTED
||
318 sk
->sk_state
== IUCV_SEVERED
||
320 iucv_accept_unlink(sk
);
322 sock_graft(sk
, newsock
);
324 if (sk
->sk_state
== IUCV_SEVERED
)
325 sk
->sk_state
= IUCV_DISCONN
;
336 int iucv_sock_wait_state(struct sock
*sk
, int state
, int state2
,
339 DECLARE_WAITQUEUE(wait
, current
);
342 add_wait_queue(sk
->sk_sleep
, &wait
);
343 while (sk
->sk_state
!= state
&& sk
->sk_state
!= state2
) {
344 set_current_state(TASK_INTERRUPTIBLE
);
351 if (signal_pending(current
)) {
352 err
= sock_intr_errno(timeo
);
357 timeo
= schedule_timeout(timeo
);
360 err
= sock_error(sk
);
364 set_current_state(TASK_RUNNING
);
365 remove_wait_queue(sk
->sk_sleep
, &wait
);
369 /* Bind an unbound socket */
370 static int iucv_sock_bind(struct socket
*sock
, struct sockaddr
*addr
,
373 struct sockaddr_iucv
*sa
= (struct sockaddr_iucv
*) addr
;
374 struct sock
*sk
= sock
->sk
;
375 struct iucv_sock
*iucv
;
378 /* Verify the input sockaddr */
379 if (!addr
|| addr
->sa_family
!= AF_IUCV
)
383 if (sk
->sk_state
!= IUCV_OPEN
) {
388 write_lock_bh(&iucv_sk_list
.lock
);
391 if (__iucv_get_sock_by_name(sa
->siucv_name
)) {
400 /* Bind the socket */
401 memcpy(iucv
->src_name
, sa
->siucv_name
, 8);
403 /* Copy the user id */
404 memcpy(iucv
->src_user_id
, iucv_userid
, 8);
405 sk
->sk_state
= IUCV_BOUND
;
409 /* Release the socket list lock */
410 write_unlock_bh(&iucv_sk_list
.lock
);
416 /* Automatically bind an unbound socket */
417 static int iucv_sock_autobind(struct sock
*sk
)
419 struct iucv_sock
*iucv
= iucv_sk(sk
);
420 char query_buffer
[80];
424 /* Set the userid and name */
425 cpcmd("QUERY USERID", query_buffer
, sizeof(query_buffer
), &err
);
429 memcpy(iucv
->src_user_id
, query_buffer
, 8);
431 write_lock_bh(&iucv_sk_list
.lock
);
433 sprintf(name
, "%08x", atomic_inc_return(&iucv_sk_list
.autobind_name
));
434 while (__iucv_get_sock_by_name(name
)) {
435 sprintf(name
, "%08x",
436 atomic_inc_return(&iucv_sk_list
.autobind_name
));
439 write_unlock_bh(&iucv_sk_list
.lock
);
441 memcpy(&iucv
->src_name
, name
, 8);
446 /* Connect an unconnected socket */
447 static int iucv_sock_connect(struct socket
*sock
, struct sockaddr
*addr
,
450 struct sockaddr_iucv
*sa
= (struct sockaddr_iucv
*) addr
;
451 struct sock
*sk
= sock
->sk
;
452 struct iucv_sock
*iucv
;
453 unsigned char user_data
[16];
456 if (addr
->sa_family
!= AF_IUCV
|| alen
< sizeof(struct sockaddr_iucv
))
459 if (sk
->sk_state
!= IUCV_OPEN
&& sk
->sk_state
!= IUCV_BOUND
)
462 if (sk
->sk_type
!= SOCK_STREAM
)
467 if (sk
->sk_state
== IUCV_OPEN
) {
468 err
= iucv_sock_autobind(sk
);
475 /* Set the destination information */
476 memcpy(iucv_sk(sk
)->dst_user_id
, sa
->siucv_user_id
, 8);
477 memcpy(iucv_sk(sk
)->dst_name
, sa
->siucv_name
, 8);
479 high_nmcpy(user_data
, sa
->siucv_name
);
480 low_nmcpy(user_data
, iucv_sk(sk
)->src_name
);
481 ASCEBC(user_data
, sizeof(user_data
));
485 iucv
->path
= iucv_path_alloc(IUCV_QUEUELEN_DEFAULT
,
486 IPRMDATA
, GFP_KERNEL
);
487 err
= iucv_path_connect(iucv
->path
, &af_iucv_handler
,
488 sa
->siucv_user_id
, NULL
, user_data
, sk
);
490 iucv_path_free(iucv
->path
);
496 if (sk
->sk_state
!= IUCV_CONNECTED
) {
497 err
= iucv_sock_wait_state(sk
, IUCV_CONNECTED
, IUCV_DISCONN
,
498 sock_sndtimeo(sk
, flags
& O_NONBLOCK
));
501 if (sk
->sk_state
== IUCV_DISCONN
) {
503 return -ECONNREFUSED
;
510 /* Move a socket into listening state. */
511 static int iucv_sock_listen(struct socket
*sock
, int backlog
)
513 struct sock
*sk
= sock
->sk
;
519 if (sk
->sk_state
!= IUCV_BOUND
|| sock
->type
!= SOCK_STREAM
)
522 sk
->sk_max_ack_backlog
= backlog
;
523 sk
->sk_ack_backlog
= 0;
524 sk
->sk_state
= IUCV_LISTEN
;
532 /* Accept a pending connection */
533 static int iucv_sock_accept(struct socket
*sock
, struct socket
*newsock
,
536 DECLARE_WAITQUEUE(wait
, current
);
537 struct sock
*sk
= sock
->sk
, *nsk
;
541 lock_sock_nested(sk
, SINGLE_DEPTH_NESTING
);
543 if (sk
->sk_state
!= IUCV_LISTEN
) {
548 timeo
= sock_rcvtimeo(sk
, flags
& O_NONBLOCK
);
550 /* Wait for an incoming connection */
551 add_wait_queue_exclusive(sk
->sk_sleep
, &wait
);
552 while (!(nsk
= iucv_accept_dequeue(sk
, newsock
))) {
553 set_current_state(TASK_INTERRUPTIBLE
);
560 timeo
= schedule_timeout(timeo
);
561 lock_sock_nested(sk
, SINGLE_DEPTH_NESTING
);
563 if (sk
->sk_state
!= IUCV_LISTEN
) {
568 if (signal_pending(current
)) {
569 err
= sock_intr_errno(timeo
);
574 set_current_state(TASK_RUNNING
);
575 remove_wait_queue(sk
->sk_sleep
, &wait
);
580 newsock
->state
= SS_CONNECTED
;
587 static int iucv_sock_getname(struct socket
*sock
, struct sockaddr
*addr
,
590 struct sockaddr_iucv
*siucv
= (struct sockaddr_iucv
*) addr
;
591 struct sock
*sk
= sock
->sk
;
593 addr
->sa_family
= AF_IUCV
;
594 *len
= sizeof(struct sockaddr_iucv
);
597 memcpy(siucv
->siucv_user_id
, iucv_sk(sk
)->dst_user_id
, 8);
598 memcpy(siucv
->siucv_name
, &iucv_sk(sk
)->dst_name
, 8);
600 memcpy(siucv
->siucv_user_id
, iucv_sk(sk
)->src_user_id
, 8);
601 memcpy(siucv
->siucv_name
, iucv_sk(sk
)->src_name
, 8);
603 memset(&siucv
->siucv_port
, 0, sizeof(siucv
->siucv_port
));
604 memset(&siucv
->siucv_addr
, 0, sizeof(siucv
->siucv_addr
));
605 memset(siucv
->siucv_nodeid
, 0, sizeof(siucv
->siucv_nodeid
));
610 static int iucv_sock_sendmsg(struct kiocb
*iocb
, struct socket
*sock
,
611 struct msghdr
*msg
, size_t len
)
613 struct sock
*sk
= sock
->sk
;
614 struct iucv_sock
*iucv
= iucv_sk(sk
);
616 struct iucv_message txmsg
;
619 err
= sock_error(sk
);
623 if (msg
->msg_flags
& MSG_OOB
)
628 if (sk
->sk_shutdown
& SEND_SHUTDOWN
) {
633 if (sk
->sk_state
== IUCV_CONNECTED
) {
634 if (!(skb
= sock_alloc_send_skb(sk
, len
,
635 msg
->msg_flags
& MSG_DONTWAIT
,
639 if (memcpy_fromiovec(skb_put(skb
, len
), msg
->msg_iov
, len
)) {
645 txmsg
.tag
= iucv
->send_tag
++;
646 memcpy(skb
->cb
, &txmsg
.tag
, 4);
647 skb_queue_tail(&iucv
->send_skb_q
, skb
);
648 err
= iucv_message_send(iucv
->path
, &txmsg
, 0, 0,
649 (void *) skb
->data
, skb
->len
);
652 printk(KERN_ERR
"AF_IUCV msg limit exceeded\n");
653 skb_unlink(skb
, &iucv
->send_skb_q
);
673 static int iucv_sock_recvmsg(struct kiocb
*iocb
, struct socket
*sock
,
674 struct msghdr
*msg
, size_t len
, int flags
)
676 int noblock
= flags
& MSG_DONTWAIT
;
677 struct sock
*sk
= sock
->sk
;
678 struct iucv_sock
*iucv
= iucv_sk(sk
);
679 int target
, copied
= 0;
680 struct sk_buff
*skb
, *rskb
, *cskb
;
683 if ((sk
->sk_state
== IUCV_DISCONN
|| sk
->sk_state
== IUCV_SEVERED
) &&
684 skb_queue_empty(&iucv
->backlog_skb_q
) &&
685 skb_queue_empty(&sk
->sk_receive_queue
))
688 if (flags
& (MSG_OOB
))
691 target
= sock_rcvlowat(sk
, flags
& MSG_WAITALL
, len
);
693 skb
= skb_recv_datagram(sk
, flags
, noblock
, &err
);
695 if (sk
->sk_shutdown
& RCV_SHUTDOWN
)
700 copied
= min_t(unsigned int, skb
->len
, len
);
703 if (memcpy_toiovec(msg
->msg_iov
, cskb
->data
, copied
)) {
704 skb_queue_head(&sk
->sk_receive_queue
, skb
);
712 /* Mark read part of skb as used */
713 if (!(flags
& MSG_PEEK
)) {
714 skb_pull(skb
, copied
);
717 skb_queue_head(&sk
->sk_receive_queue
, skb
);
723 /* Queue backlog skbs */
724 rskb
= skb_dequeue(&iucv_sk(sk
)->backlog_skb_q
);
726 if (sock_queue_rcv_skb(sk
, rskb
)) {
727 skb_queue_head(&iucv_sk(sk
)->backlog_skb_q
,
731 rskb
= skb_dequeue(&iucv_sk(sk
)->backlog_skb_q
);
735 skb_queue_head(&sk
->sk_receive_queue
, skb
);
738 return err
? : copied
;
741 static inline unsigned int iucv_accept_poll(struct sock
*parent
)
743 struct iucv_sock
*isk
, *n
;
746 list_for_each_entry_safe(isk
, n
, &iucv_sk(parent
)->accept_q
, accept_q
) {
747 sk
= (struct sock
*) isk
;
749 if (sk
->sk_state
== IUCV_CONNECTED
)
750 return POLLIN
| POLLRDNORM
;
756 unsigned int iucv_sock_poll(struct file
*file
, struct socket
*sock
,
759 struct sock
*sk
= sock
->sk
;
760 unsigned int mask
= 0;
762 poll_wait(file
, sk
->sk_sleep
, wait
);
764 if (sk
->sk_state
== IUCV_LISTEN
)
765 return iucv_accept_poll(sk
);
767 if (sk
->sk_err
|| !skb_queue_empty(&sk
->sk_error_queue
))
770 if (sk
->sk_shutdown
& RCV_SHUTDOWN
)
773 if (sk
->sk_shutdown
== SHUTDOWN_MASK
)
776 if (!skb_queue_empty(&sk
->sk_receive_queue
) ||
777 (sk
->sk_shutdown
& RCV_SHUTDOWN
))
778 mask
|= POLLIN
| POLLRDNORM
;
780 if (sk
->sk_state
== IUCV_CLOSED
)
783 if (sk
->sk_state
== IUCV_DISCONN
|| sk
->sk_state
== IUCV_SEVERED
)
786 if (sock_writeable(sk
))
787 mask
|= POLLOUT
| POLLWRNORM
| POLLWRBAND
;
789 set_bit(SOCK_ASYNC_NOSPACE
, &sk
->sk_socket
->flags
);
794 static int iucv_sock_shutdown(struct socket
*sock
, int how
)
796 struct sock
*sk
= sock
->sk
;
797 struct iucv_sock
*iucv
= iucv_sk(sk
);
798 struct iucv_message txmsg
;
800 u8 prmmsg
[8] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01};
804 if ((how
& ~SHUTDOWN_MASK
) || !how
)
808 switch (sk
->sk_state
) {
814 sk
->sk_shutdown
|= how
;
818 if (how
== SEND_SHUTDOWN
|| how
== SHUTDOWN_MASK
) {
821 err
= iucv_message_send(iucv
->path
, &txmsg
, IUCV_IPRMDATA
, 0,
838 if (how
== RCV_SHUTDOWN
|| how
== SHUTDOWN_MASK
) {
839 err
= iucv_path_quiesce(iucv_sk(sk
)->path
, NULL
);
843 skb_queue_purge(&sk
->sk_receive_queue
);
846 /* Wake up anyone sleeping in poll */
847 sk
->sk_state_change(sk
);
854 static int iucv_sock_release(struct socket
*sock
)
856 struct sock
*sk
= sock
->sk
;
864 /* Unregister with IUCV base support */
865 if (iucv_sk(sk
)->path
) {
866 iucv_path_sever(iucv_sk(sk
)->path
, NULL
);
867 iucv_path_free(iucv_sk(sk
)->path
);
868 iucv_sk(sk
)->path
= NULL
;
876 /* Callback wrappers - called from iucv base support */
877 static int iucv_callback_connreq(struct iucv_path
*path
,
878 u8 ipvmid
[8], u8 ipuser
[16])
880 unsigned char user_data
[16];
881 unsigned char nuser_data
[16];
882 unsigned char src_name
[8];
883 struct hlist_node
*node
;
884 struct sock
*sk
, *nsk
;
885 struct iucv_sock
*iucv
, *niucv
;
888 memcpy(src_name
, ipuser
, 8);
890 /* Find out if this path belongs to af_iucv. */
891 read_lock(&iucv_sk_list
.lock
);
894 sk_for_each(sk
, node
, &iucv_sk_list
.head
)
895 if (sk
->sk_state
== IUCV_LISTEN
&&
896 !memcmp(&iucv_sk(sk
)->src_name
, src_name
, 8)) {
898 * Found a listening socket with
899 * src_name == ipuser[0-7].
904 read_unlock(&iucv_sk_list
.lock
);
906 /* No socket found, not one of our paths. */
911 /* Check if parent socket is listening */
912 low_nmcpy(user_data
, iucv
->src_name
);
913 high_nmcpy(user_data
, iucv
->dst_name
);
914 ASCEBC(user_data
, sizeof(user_data
));
915 if (sk
->sk_state
!= IUCV_LISTEN
) {
916 err
= iucv_path_sever(path
, user_data
);
920 /* Check for backlog size */
921 if (sk_acceptq_is_full(sk
)) {
922 err
= iucv_path_sever(path
, user_data
);
926 /* Create the new socket */
927 nsk
= iucv_sock_alloc(NULL
, SOCK_STREAM
, GFP_ATOMIC
);
929 err
= iucv_path_sever(path
, user_data
);
933 niucv
= iucv_sk(nsk
);
934 iucv_sock_init(nsk
, sk
);
936 /* Set the new iucv_sock */
937 memcpy(niucv
->dst_name
, ipuser
+ 8, 8);
938 EBCASC(niucv
->dst_name
, 8);
939 memcpy(niucv
->dst_user_id
, ipvmid
, 8);
940 memcpy(niucv
->src_name
, iucv
->src_name
, 8);
941 memcpy(niucv
->src_user_id
, iucv
->src_user_id
, 8);
944 /* Call iucv_accept */
945 high_nmcpy(nuser_data
, ipuser
+ 8);
946 memcpy(nuser_data
+ 8, niucv
->src_name
, 8);
947 ASCEBC(nuser_data
+ 8, 8);
949 path
->msglim
= IUCV_QUEUELEN_DEFAULT
;
950 err
= iucv_path_accept(path
, &af_iucv_handler
, nuser_data
, nsk
);
952 err
= iucv_path_sever(path
, user_data
);
956 iucv_accept_enqueue(sk
, nsk
);
959 nsk
->sk_state
= IUCV_CONNECTED
;
960 sk
->sk_data_ready(sk
, 1);
967 static void iucv_callback_connack(struct iucv_path
*path
, u8 ipuser
[16])
969 struct sock
*sk
= path
->private;
971 sk
->sk_state
= IUCV_CONNECTED
;
972 sk
->sk_state_change(sk
);
975 static int iucv_fragment_skb(struct sock
*sk
, struct sk_buff
*skb
, int len
,
976 struct sk_buff_head
*fragmented_skb_q
)
978 int dataleft
, size
, copied
= 0;
979 struct sk_buff
*nskb
;
983 if (dataleft
>= sk
->sk_rcvbuf
/ 4)
984 size
= sk
->sk_rcvbuf
/ 4;
988 nskb
= alloc_skb(size
, GFP_ATOMIC
| GFP_DMA
);
992 memcpy(nskb
->data
, skb
->data
+ copied
, size
);
996 skb_reset_transport_header(nskb
);
997 skb_reset_network_header(nskb
);
1000 skb_queue_tail(fragmented_skb_q
, nskb
);
1006 static void iucv_callback_rx(struct iucv_path
*path
, struct iucv_message
*msg
)
1008 struct sock
*sk
= path
->private;
1009 struct iucv_sock
*iucv
= iucv_sk(sk
);
1010 struct sk_buff
*skb
, *fskb
;
1011 struct sk_buff_head fragmented_skb_q
;
1014 skb_queue_head_init(&fragmented_skb_q
);
1016 if (sk
->sk_shutdown
& RCV_SHUTDOWN
)
1019 skb
= alloc_skb(msg
->length
, GFP_ATOMIC
| GFP_DMA
);
1021 iucv_path_sever(path
, NULL
);
1025 if (msg
->flags
& IPRMDATA
) {
1029 rc
= iucv_message_receive(path
, msg
, 0, skb
->data
,
1035 if (skb
->truesize
>= sk
->sk_rcvbuf
/ 4) {
1036 rc
= iucv_fragment_skb(sk
, skb
, msg
->length
,
1041 iucv_path_sever(path
, NULL
);
1045 skb_reset_transport_header(skb
);
1046 skb_reset_network_header(skb
);
1047 skb
->len
= msg
->length
;
1050 /* Queue the fragmented skb */
1051 fskb
= skb_dequeue(&fragmented_skb_q
);
1053 if (!skb_queue_empty(&iucv
->backlog_skb_q
))
1054 skb_queue_tail(&iucv
->backlog_skb_q
, fskb
);
1055 else if (sock_queue_rcv_skb(sk
, fskb
))
1056 skb_queue_tail(&iucv_sk(sk
)->backlog_skb_q
, fskb
);
1057 fskb
= skb_dequeue(&fragmented_skb_q
);
1060 /* Queue the original skb if it exists (was not fragmented) */
1062 if (!skb_queue_empty(&iucv
->backlog_skb_q
))
1063 skb_queue_tail(&iucv_sk(sk
)->backlog_skb_q
, skb
);
1064 else if (sock_queue_rcv_skb(sk
, skb
))
1065 skb_queue_tail(&iucv_sk(sk
)->backlog_skb_q
, skb
);
1070 static void iucv_callback_txdone(struct iucv_path
*path
,
1071 struct iucv_message
*msg
)
1073 struct sock
*sk
= path
->private;
1074 struct sk_buff
*this;
1075 struct sk_buff_head
*list
= &iucv_sk(sk
)->send_skb_q
;
1076 struct sk_buff
*list_skb
= list
->next
;
1077 unsigned long flags
;
1080 spin_lock_irqsave(&list
->lock
, flags
);
1084 list_skb
= list_skb
->next
;
1085 } while (memcmp(&msg
->tag
, this->cb
, 4) && list_skb
);
1087 spin_unlock_irqrestore(&list
->lock
, flags
);
1089 skb_unlink(this, &iucv_sk(sk
)->send_skb_q
);
1093 if (sk
->sk_state
== IUCV_CLOSING
) {
1094 if (skb_queue_empty(&iucv_sk(sk
)->send_skb_q
)) {
1095 sk
->sk_state
= IUCV_CLOSED
;
1096 sk
->sk_state_change(sk
);
1102 static void iucv_callback_connrej(struct iucv_path
*path
, u8 ipuser
[16])
1104 struct sock
*sk
= path
->private;
1106 if (!list_empty(&iucv_sk(sk
)->accept_q
))
1107 sk
->sk_state
= IUCV_SEVERED
;
1109 sk
->sk_state
= IUCV_DISCONN
;
1111 sk
->sk_state_change(sk
);
1114 static struct proto_ops iucv_sock_ops
= {
1116 .owner
= THIS_MODULE
,
1117 .release
= iucv_sock_release
,
1118 .bind
= iucv_sock_bind
,
1119 .connect
= iucv_sock_connect
,
1120 .listen
= iucv_sock_listen
,
1121 .accept
= iucv_sock_accept
,
1122 .getname
= iucv_sock_getname
,
1123 .sendmsg
= iucv_sock_sendmsg
,
1124 .recvmsg
= iucv_sock_recvmsg
,
1125 .poll
= iucv_sock_poll
,
1126 .ioctl
= sock_no_ioctl
,
1127 .mmap
= sock_no_mmap
,
1128 .socketpair
= sock_no_socketpair
,
1129 .shutdown
= iucv_sock_shutdown
,
1130 .setsockopt
= sock_no_setsockopt
,
1131 .getsockopt
= sock_no_getsockopt
1134 static struct net_proto_family iucv_sock_family_ops
= {
1136 .owner
= THIS_MODULE
,
1137 .create
= iucv_sock_create
,
1140 static int __init
afiucv_init(void)
1144 if (!MACHINE_IS_VM
) {
1145 printk(KERN_ERR
"AF_IUCV connection needs VM as base\n");
1146 err
= -EPROTONOSUPPORT
;
1149 cpcmd("QUERY USERID", iucv_userid
, sizeof(iucv_userid
), &err
);
1150 if (unlikely(err
)) {
1151 printk(KERN_ERR
"AF_IUCV needs the VM userid\n");
1152 err
= -EPROTONOSUPPORT
;
1156 err
= iucv_register(&af_iucv_handler
, 0);
1159 err
= proto_register(&iucv_proto
, 0);
1162 err
= sock_register(&iucv_sock_family_ops
);
1165 printk(KERN_INFO
"AF_IUCV lowlevel driver initialized\n");
1169 proto_unregister(&iucv_proto
);
1171 iucv_unregister(&af_iucv_handler
, 0);
1176 static void __exit
afiucv_exit(void)
1178 sock_unregister(PF_IUCV
);
1179 proto_unregister(&iucv_proto
);
1180 iucv_unregister(&af_iucv_handler
, 0);
1182 printk(KERN_INFO
"AF_IUCV lowlevel driver unloaded\n");
1185 module_init(afiucv_init
);
1186 module_exit(afiucv_exit
);
1188 MODULE_AUTHOR("Jennifer Hunt <jenhunt@us.ibm.com>");
1189 MODULE_DESCRIPTION("IUCV Sockets ver " VERSION
);
1190 MODULE_VERSION(VERSION
);
1191 MODULE_LICENSE("GPL");
1192 MODULE_ALIAS_NETPROTO(PF_IUCV
);