2 * Shared Memory Communications over RDMA (SMC-R) and RoCE
4 * AF_SMC protocol family socket handler keeping the AF_INET sock address type
5 * applies to SOCK_STREAM sockets only
6 * offers an alternative communication option for TCP-protocol sockets
7 * applicable with RoCE-cards only
9 * Initial restrictions:
10 * - non-blocking connect postponed
11 * - IPv6 support postponed
12 * - support for alternate links postponed
13 * - partial support for non-blocking sockets only
14 * - support for urgent data postponed
16 * Copyright IBM Corp. 2016
18 * Author(s): Ursula Braun <ubraun@linux.vnet.ibm.com>
19 * based on prototype from Frank Blaschka
22 #define KMSG_COMPONENT "smc"
23 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
25 #include <linux/module.h>
26 #include <linux/socket.h>
27 #include <linux/inetdevice.h>
28 #include <linux/workqueue.h>
30 #include <linux/sched/signal.h>
45 #include "smc_close.h"
47 static DEFINE_MUTEX(smc_create_lgr_pending
); /* serialize link group
51 struct smc_lgr_list smc_lgr_list
= { /* established link groups */
52 .lock
= __SPIN_LOCK_UNLOCKED(smc_lgr_list
.lock
),
53 .list
= LIST_HEAD_INIT(smc_lgr_list
.list
),
56 static void smc_tcp_listen_work(struct work_struct
*);
58 static void smc_set_keepalive(struct sock
*sk
, int val
)
60 struct smc_sock
*smc
= smc_sk(sk
);
62 smc
->clcsock
->sk
->sk_prot
->keepalive(smc
->clcsock
->sk
, val
);
65 static struct smc_hashinfo smc_v4_hashinfo
= {
66 .lock
= __RW_LOCK_UNLOCKED(smc_v4_hashinfo
.lock
),
69 int smc_hash_sk(struct sock
*sk
)
71 struct smc_hashinfo
*h
= sk
->sk_prot
->h
.smc_hash
;
72 struct hlist_head
*head
;
76 write_lock_bh(&h
->lock
);
77 sk_add_node(sk
, head
);
78 sock_prot_inuse_add(sock_net(sk
), sk
->sk_prot
, 1);
79 write_unlock_bh(&h
->lock
);
83 EXPORT_SYMBOL_GPL(smc_hash_sk
);
85 void smc_unhash_sk(struct sock
*sk
)
87 struct smc_hashinfo
*h
= sk
->sk_prot
->h
.smc_hash
;
89 write_lock_bh(&h
->lock
);
90 if (sk_del_node_init(sk
))
91 sock_prot_inuse_add(sock_net(sk
), sk
->sk_prot
, -1);
92 write_unlock_bh(&h
->lock
);
94 EXPORT_SYMBOL_GPL(smc_unhash_sk
);
96 struct proto smc_proto
= {
99 .keepalive
= smc_set_keepalive
,
101 .unhash
= smc_unhash_sk
,
102 .obj_size
= sizeof(struct smc_sock
),
103 .h
.smc_hash
= &smc_v4_hashinfo
,
104 .slab_flags
= SLAB_TYPESAFE_BY_RCU
,
106 EXPORT_SYMBOL_GPL(smc_proto
);
108 static int smc_release(struct socket
*sock
)
110 struct sock
*sk
= sock
->sk
;
111 struct smc_sock
*smc
;
119 if (sk
->sk_state
== SMC_LISTEN
)
120 /* smc_close_non_accepted() is called and acquires
121 * sock lock for child sockets again
123 lock_sock_nested(sk
, SINGLE_DEPTH_NESTING
);
127 if (smc
->use_fallback
) {
128 sk
->sk_state
= SMC_CLOSED
;
129 sk
->sk_state_change(sk
);
131 rc
= smc_close_active(smc
);
132 sock_set_flag(sk
, SOCK_DEAD
);
133 sk
->sk_shutdown
|= SHUTDOWN_MASK
;
136 sock_release(smc
->clcsock
);
143 if (smc
->use_fallback
) {
144 schedule_delayed_work(&smc
->sock_put_work
, TCP_TIMEWAIT_LEN
);
145 } else if (sk
->sk_state
== SMC_CLOSED
) {
146 smc_conn_free(&smc
->conn
);
147 schedule_delayed_work(&smc
->sock_put_work
,
148 SMC_CLOSE_SOCK_PUT_DELAY
);
157 static void smc_destruct(struct sock
*sk
)
159 if (sk
->sk_state
!= SMC_CLOSED
)
161 if (!sock_flag(sk
, SOCK_DEAD
))
164 sk_refcnt_debug_dec(sk
);
167 static struct sock
*smc_sock_alloc(struct net
*net
, struct socket
*sock
)
169 struct smc_sock
*smc
;
172 sk
= sk_alloc(net
, PF_SMC
, GFP_KERNEL
, &smc_proto
, 0);
176 sock_init_data(sock
, sk
); /* sets sk_refcnt to 1 */
177 sk
->sk_state
= SMC_INIT
;
178 sk
->sk_destruct
= smc_destruct
;
179 sk
->sk_protocol
= SMCPROTO_SMC
;
181 INIT_WORK(&smc
->tcp_listen_work
, smc_tcp_listen_work
);
182 INIT_LIST_HEAD(&smc
->accept_q
);
183 spin_lock_init(&smc
->accept_q_lock
);
184 INIT_DELAYED_WORK(&smc
->sock_put_work
, smc_close_sock_put_work
);
185 sk
->sk_prot
->hash(sk
);
186 sk_refcnt_debug_inc(sk
);
191 static int smc_bind(struct socket
*sock
, struct sockaddr
*uaddr
,
194 struct sockaddr_in
*addr
= (struct sockaddr_in
*)uaddr
;
195 struct sock
*sk
= sock
->sk
;
196 struct smc_sock
*smc
;
201 /* replicate tests from inet_bind(), to be safe wrt. future changes */
203 if (addr_len
< sizeof(struct sockaddr_in
))
207 /* accept AF_UNSPEC (mapped to AF_INET) only if s_addr is INADDR_ANY */
208 if ((addr
->sin_family
!= AF_INET
) &&
209 ((addr
->sin_family
!= AF_UNSPEC
) ||
210 (addr
->sin_addr
.s_addr
!= htonl(INADDR_ANY
))))
215 /* Check if socket is already active */
217 if (sk
->sk_state
!= SMC_INIT
)
220 smc
->clcsock
->sk
->sk_reuse
= sk
->sk_reuse
;
221 rc
= kernel_bind(smc
->clcsock
, uaddr
, addr_len
);
229 static void smc_copy_sock_settings(struct sock
*nsk
, struct sock
*osk
,
232 /* options we don't get control via setsockopt for */
233 nsk
->sk_type
= osk
->sk_type
;
234 nsk
->sk_sndbuf
= osk
->sk_sndbuf
;
235 nsk
->sk_rcvbuf
= osk
->sk_rcvbuf
;
236 nsk
->sk_sndtimeo
= osk
->sk_sndtimeo
;
237 nsk
->sk_rcvtimeo
= osk
->sk_rcvtimeo
;
238 nsk
->sk_mark
= osk
->sk_mark
;
239 nsk
->sk_priority
= osk
->sk_priority
;
240 nsk
->sk_rcvlowat
= osk
->sk_rcvlowat
;
241 nsk
->sk_bound_dev_if
= osk
->sk_bound_dev_if
;
242 nsk
->sk_err
= osk
->sk_err
;
244 nsk
->sk_flags
&= ~mask
;
245 nsk
->sk_flags
|= osk
->sk_flags
& mask
;
248 #define SK_FLAGS_SMC_TO_CLC ((1UL << SOCK_URGINLINE) | \
249 (1UL << SOCK_KEEPOPEN) | \
250 (1UL << SOCK_LINGER) | \
251 (1UL << SOCK_BROADCAST) | \
252 (1UL << SOCK_TIMESTAMP) | \
253 (1UL << SOCK_DBG) | \
254 (1UL << SOCK_RCVTSTAMP) | \
255 (1UL << SOCK_RCVTSTAMPNS) | \
256 (1UL << SOCK_LOCALROUTE) | \
257 (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE) | \
258 (1UL << SOCK_RXQ_OVFL) | \
259 (1UL << SOCK_WIFI_STATUS) | \
260 (1UL << SOCK_NOFCS) | \
261 (1UL << SOCK_FILTER_LOCKED))
262 /* copy only relevant settings and flags of SOL_SOCKET level from smc to
263 * clc socket (since smc is not called for these options from net/core)
265 static void smc_copy_sock_settings_to_clc(struct smc_sock
*smc
)
267 smc_copy_sock_settings(smc
->clcsock
->sk
, &smc
->sk
, SK_FLAGS_SMC_TO_CLC
);
270 #define SK_FLAGS_CLC_TO_SMC ((1UL << SOCK_URGINLINE) | \
271 (1UL << SOCK_KEEPOPEN) | \
272 (1UL << SOCK_LINGER) | \
274 /* copy only settings and flags relevant for smc from clc to smc socket */
275 static void smc_copy_sock_settings_to_smc(struct smc_sock
*smc
)
277 smc_copy_sock_settings(&smc
->sk
, smc
->clcsock
->sk
, SK_FLAGS_CLC_TO_SMC
);
280 /* determine subnet and mask of internal TCP socket */
281 int smc_netinfo_by_tcpsk(struct socket
*clcsock
,
282 __be32
*subnet
, u8
*prefix_len
)
284 struct dst_entry
*dst
= sk_dst_get(clcsock
->sk
);
285 struct sockaddr_in addr
;
298 /* get address to which the internal TCP socket is bound */
299 kernel_getsockname(clcsock
, (struct sockaddr
*)&addr
, &len
);
300 /* analyze IPv4 specific data of net_device belonging to TCP socket */
301 for_ifa(dst
->dev
->ip_ptr
) {
302 if (ifa
->ifa_address
!= addr
.sin_addr
.s_addr
)
304 *prefix_len
= inet_mask_len(ifa
->ifa_mask
);
305 *subnet
= ifa
->ifa_address
& ifa
->ifa_mask
;
308 } endfor_ifa(dst
->dev
->ip_ptr
);
316 static int smc_clnt_conf_first_link(struct smc_sock
*smc
, union ib_gid
*gid
)
318 struct smc_link_group
*lgr
= smc
->conn
.lgr
;
319 struct smc_link
*link
;
323 link
= &lgr
->lnk
[SMC_SINGLE_LINK
];
324 /* receive CONFIRM LINK request from server over RoCE fabric */
325 rest
= wait_for_completion_interruptible_timeout(
327 SMC_LLC_WAIT_FIRST_TIME
);
329 struct smc_clc_msg_decline dclc
;
331 rc
= smc_clc_wait_msg(smc
, &dclc
, sizeof(dclc
),
336 rc
= smc_ib_modify_qp_rts(link
);
338 return SMC_CLC_DECL_INTERR
;
340 smc_wr_remember_qp_attr(link
);
341 /* send CONFIRM LINK response over RoCE fabric */
342 rc
= smc_llc_send_confirm_link(link
,
343 link
->smcibdev
->mac
[link
->ibport
- 1],
346 return SMC_CLC_DECL_TCL
;
351 static void smc_conn_save_peer_info(struct smc_sock
*smc
,
352 struct smc_clc_msg_accept_confirm
*clc
)
354 smc
->conn
.peer_conn_idx
= clc
->conn_idx
;
355 smc
->conn
.local_tx_ctrl
.token
= ntohl(clc
->rmbe_alert_token
);
356 smc
->conn
.peer_rmbe_size
= smc_uncompress_bufsize(clc
->rmbe_size
);
357 atomic_set(&smc
->conn
.peer_rmbe_space
, smc
->conn
.peer_rmbe_size
);
360 static void smc_link_save_peer_info(struct smc_link
*link
,
361 struct smc_clc_msg_accept_confirm
*clc
)
363 link
->peer_qpn
= ntoh24(clc
->qpn
);
364 memcpy(link
->peer_gid
, clc
->lcl
.gid
, SMC_GID_SIZE
);
365 memcpy(link
->peer_mac
, clc
->lcl
.mac
, sizeof(link
->peer_mac
));
366 link
->peer_psn
= ntoh24(clc
->psn
);
367 link
->peer_mtu
= clc
->qp_mtu
;
370 /* setup for RDMA connection of client */
371 static int smc_connect_rdma(struct smc_sock
*smc
)
373 struct sockaddr_in
*inaddr
= (struct sockaddr_in
*)smc
->addr
;
374 struct smc_clc_msg_accept_confirm aclc
;
375 int local_contact
= SMC_FIRST_CONTACT
;
376 struct smc_ib_device
*smcibdev
;
377 struct smc_link
*link
;
378 u8 srv_first_contact
;
383 /* IPSec connections opt out of SMC-R optimizations */
384 if (using_ipsec(smc
)) {
385 reason_code
= SMC_CLC_DECL_IPSEC
;
389 /* PNET table look up: search active ib_device and port
390 * within same PNETID that also contains the ethernet device
391 * used for the internal TCP socket
393 smc_pnet_find_roce_resource(smc
->clcsock
->sk
, &smcibdev
, &ibport
);
395 reason_code
= SMC_CLC_DECL_CNFERR
; /* configuration error */
399 /* do inband token exchange */
400 reason_code
= smc_clc_send_proposal(smc
, smcibdev
, ibport
);
401 if (reason_code
< 0) {
405 if (reason_code
> 0) /* configuration error */
407 /* receive SMC Accept CLC message */
408 reason_code
= smc_clc_wait_msg(smc
, &aclc
, sizeof(aclc
),
410 if (reason_code
< 0) {
417 srv_first_contact
= aclc
.hdr
.flag
;
418 mutex_lock(&smc_create_lgr_pending
);
419 local_contact
= smc_conn_create(smc
, inaddr
->sin_addr
.s_addr
, smcibdev
,
420 ibport
, &aclc
.lcl
, srv_first_contact
);
421 if (local_contact
< 0) {
424 reason_code
= SMC_CLC_DECL_MEM
;/* insufficient memory*/
425 else if (rc
== -ENOLINK
)
426 reason_code
= SMC_CLC_DECL_SYNCERR
; /* synchr. error */
427 goto decline_rdma_unlock
;
429 link
= &smc
->conn
.lgr
->lnk
[SMC_SINGLE_LINK
];
431 smc_conn_save_peer_info(smc
, &aclc
);
433 rc
= smc_sndbuf_create(smc
);
435 reason_code
= SMC_CLC_DECL_MEM
;
436 goto decline_rdma_unlock
;
438 rc
= smc_rmb_create(smc
);
440 reason_code
= SMC_CLC_DECL_MEM
;
441 goto decline_rdma_unlock
;
444 if (local_contact
== SMC_FIRST_CONTACT
)
445 smc_link_save_peer_info(link
, &aclc
);
447 rc
= smc_rmb_rtoken_handling(&smc
->conn
, &aclc
);
449 reason_code
= SMC_CLC_DECL_INTERR
;
450 goto decline_rdma_unlock
;
456 if (local_contact
== SMC_FIRST_CONTACT
) {
457 rc
= smc_ib_ready_link(link
);
459 reason_code
= SMC_CLC_DECL_INTERR
;
460 goto decline_rdma_unlock
;
464 rc
= smc_clc_send_confirm(smc
);
468 if (local_contact
== SMC_FIRST_CONTACT
) {
469 /* QP confirmation over RoCE fabric */
470 reason_code
= smc_clnt_conf_first_link(
471 smc
, &smcibdev
->gid
[ibport
- 1]);
472 if (reason_code
< 0) {
477 goto decline_rdma_unlock
;
480 mutex_unlock(&smc_create_lgr_pending
);
484 smc_copy_sock_settings_to_clc(smc
);
485 if (smc
->sk
.sk_state
== SMC_INIT
)
486 smc
->sk
.sk_state
= SMC_ACTIVE
;
488 return rc
? rc
: local_contact
;
491 mutex_unlock(&smc_create_lgr_pending
);
492 smc_conn_free(&smc
->conn
);
494 /* RDMA setup failed, switch back to TCP */
495 smc
->use_fallback
= true;
496 if (reason_code
&& (reason_code
!= SMC_CLC_DECL_REPLY
)) {
497 rc
= smc_clc_send_decline(smc
, reason_code
, 0);
498 if (rc
< sizeof(struct smc_clc_msg_decline
))
504 mutex_unlock(&smc_create_lgr_pending
);
505 smc_conn_free(&smc
->conn
);
510 static int smc_connect(struct socket
*sock
, struct sockaddr
*addr
,
513 struct sock
*sk
= sock
->sk
;
514 struct smc_sock
*smc
;
519 /* separate smc parameter checking to be safe */
520 if (alen
< sizeof(addr
->sa_family
))
522 if (addr
->sa_family
!= AF_INET
)
524 smc
->addr
= addr
; /* needed for nonblocking connect */
527 switch (sk
->sk_state
) {
538 smc_copy_sock_settings_to_clc(smc
);
539 rc
= kernel_connect(smc
->clcsock
, addr
, alen
, flags
);
543 /* setup RDMA connection */
544 rc
= smc_connect_rdma(smc
);
548 rc
= 0; /* success cases including fallback */
556 static int smc_clcsock_accept(struct smc_sock
*lsmc
, struct smc_sock
**new_smc
)
558 struct sock
*sk
= &lsmc
->sk
;
559 struct socket
*new_clcsock
;
563 release_sock(&lsmc
->sk
);
564 new_sk
= smc_sock_alloc(sock_net(sk
), NULL
);
567 lsmc
->sk
.sk_err
= ENOMEM
;
569 lock_sock(&lsmc
->sk
);
572 *new_smc
= smc_sk(new_sk
);
574 rc
= kernel_accept(lsmc
->clcsock
, &new_clcsock
, 0);
575 lock_sock(&lsmc
->sk
);
577 lsmc
->sk
.sk_err
= -rc
;
578 new_sk
->sk_state
= SMC_CLOSED
;
579 sock_set_flag(new_sk
, SOCK_DEAD
);
580 sk
->sk_prot
->unhash(new_sk
);
585 if (lsmc
->sk
.sk_state
== SMC_CLOSED
) {
587 sock_release(new_clcsock
);
588 new_sk
->sk_state
= SMC_CLOSED
;
589 sock_set_flag(new_sk
, SOCK_DEAD
);
590 sk
->sk_prot
->unhash(new_sk
);
596 (*new_smc
)->clcsock
= new_clcsock
;
601 /* add a just created sock to the accept queue of the listen sock as
602 * candidate for a following socket accept call from user space
604 static void smc_accept_enqueue(struct sock
*parent
, struct sock
*sk
)
606 struct smc_sock
*par
= smc_sk(parent
);
609 spin_lock(&par
->accept_q_lock
);
610 list_add_tail(&smc_sk(sk
)->accept_q
, &par
->accept_q
);
611 spin_unlock(&par
->accept_q_lock
);
612 sk_acceptq_added(parent
);
615 /* remove a socket from the accept queue of its parental listening socket */
616 static void smc_accept_unlink(struct sock
*sk
)
618 struct smc_sock
*par
= smc_sk(sk
)->listen_smc
;
620 spin_lock(&par
->accept_q_lock
);
621 list_del_init(&smc_sk(sk
)->accept_q
);
622 spin_unlock(&par
->accept_q_lock
);
623 sk_acceptq_removed(&smc_sk(sk
)->listen_smc
->sk
);
627 /* remove a sock from the accept queue to bind it to a new socket created
628 * for a socket accept call from user space
630 struct sock
*smc_accept_dequeue(struct sock
*parent
,
631 struct socket
*new_sock
)
633 struct smc_sock
*isk
, *n
;
636 list_for_each_entry_safe(isk
, n
, &smc_sk(parent
)->accept_q
, accept_q
) {
637 new_sk
= (struct sock
*)isk
;
639 smc_accept_unlink(new_sk
);
640 if (new_sk
->sk_state
== SMC_CLOSED
) {
641 new_sk
->sk_prot
->unhash(new_sk
);
646 sock_graft(new_sk
, new_sock
);
652 /* clean up for a created but never accepted sock */
653 void smc_close_non_accepted(struct sock
*sk
)
655 struct smc_sock
*smc
= smc_sk(sk
);
659 if (!sk
->sk_lingertime
)
660 /* wait for peer closing */
661 sk
->sk_lingertime
= SMC_MAX_STREAM_WAIT_TIMEOUT
;
662 if (smc
->use_fallback
) {
663 sk
->sk_state
= SMC_CLOSED
;
665 smc_close_active(smc
);
666 sock_set_flag(sk
, SOCK_DEAD
);
667 sk
->sk_shutdown
|= SHUTDOWN_MASK
;
676 if (smc
->use_fallback
) {
677 schedule_delayed_work(&smc
->sock_put_work
, TCP_TIMEWAIT_LEN
);
678 } else if (sk
->sk_state
== SMC_CLOSED
) {
679 smc_conn_free(&smc
->conn
);
680 schedule_delayed_work(&smc
->sock_put_work
,
681 SMC_CLOSE_SOCK_PUT_DELAY
);
687 static int smc_serv_conf_first_link(struct smc_sock
*smc
)
689 struct smc_link_group
*lgr
= smc
->conn
.lgr
;
690 struct smc_link
*link
;
694 link
= &lgr
->lnk
[SMC_SINGLE_LINK
];
695 /* send CONFIRM LINK request to client over the RoCE fabric */
696 rc
= smc_llc_send_confirm_link(link
,
697 link
->smcibdev
->mac
[link
->ibport
- 1],
698 &link
->smcibdev
->gid
[link
->ibport
- 1],
701 return SMC_CLC_DECL_TCL
;
703 /* receive CONFIRM LINK response from client over the RoCE fabric */
704 rest
= wait_for_completion_interruptible_timeout(
705 &link
->llc_confirm_resp
,
706 SMC_LLC_WAIT_FIRST_TIME
);
708 struct smc_clc_msg_decline dclc
;
710 rc
= smc_clc_wait_msg(smc
, &dclc
, sizeof(dclc
),
717 /* setup for RDMA connection of server */
718 static void smc_listen_work(struct work_struct
*work
)
720 struct smc_sock
*new_smc
= container_of(work
, struct smc_sock
,
722 struct socket
*newclcsock
= new_smc
->clcsock
;
723 struct smc_sock
*lsmc
= new_smc
->listen_smc
;
724 struct smc_clc_msg_accept_confirm cclc
;
725 int local_contact
= SMC_REUSE_CONTACT
;
726 struct sock
*newsmcsk
= &new_smc
->sk
;
727 struct smc_clc_msg_proposal pclc
;
728 struct smc_ib_device
*smcibdev
;
729 struct sockaddr_in peeraddr
;
730 struct smc_link
*link
;
737 /* do inband token exchange -
738 *wait for and receive SMC Proposal CLC message
740 reason_code
= smc_clc_wait_msg(new_smc
, &pclc
, sizeof(pclc
),
747 /* IPSec connections opt out of SMC-R optimizations */
748 if (using_ipsec(new_smc
)) {
749 reason_code
= SMC_CLC_DECL_IPSEC
;
753 /* PNET table look up: search active ib_device and port
754 * within same PNETID that also contains the ethernet device
755 * used for the internal TCP socket
757 smc_pnet_find_roce_resource(newclcsock
->sk
, &smcibdev
, &ibport
);
759 reason_code
= SMC_CLC_DECL_CNFERR
; /* configuration error */
763 /* determine subnet and mask from internal TCP socket */
764 rc
= smc_netinfo_by_tcpsk(newclcsock
, &subnet
, &prefix_len
);
766 reason_code
= SMC_CLC_DECL_CNFERR
; /* configuration error */
769 if ((pclc
.outgoing_subnet
!= subnet
) ||
770 (pclc
.prefix_len
!= prefix_len
)) {
771 reason_code
= SMC_CLC_DECL_CNFERR
; /* configuration error */
775 /* get address of the peer connected to the internal TCP socket */
776 kernel_getpeername(newclcsock
, (struct sockaddr
*)&peeraddr
, &len
);
778 /* allocate connection / link group */
779 mutex_lock(&smc_create_lgr_pending
);
780 local_contact
= smc_conn_create(new_smc
, peeraddr
.sin_addr
.s_addr
,
781 smcibdev
, ibport
, &pclc
.lcl
, 0);
782 if (local_contact
== SMC_REUSE_CONTACT
)
783 /* lock no longer needed, free it due to following
784 * smc_clc_wait_msg() call
786 mutex_unlock(&smc_create_lgr_pending
);
787 if (local_contact
< 0) {
790 reason_code
= SMC_CLC_DECL_MEM
;/* insufficient memory*/
791 else if (rc
== -ENOLINK
)
792 reason_code
= SMC_CLC_DECL_SYNCERR
; /* synchr. error */
795 link
= &new_smc
->conn
.lgr
->lnk
[SMC_SINGLE_LINK
];
797 rc
= smc_sndbuf_create(new_smc
);
799 reason_code
= SMC_CLC_DECL_MEM
;
802 rc
= smc_rmb_create(new_smc
);
804 reason_code
= SMC_CLC_DECL_MEM
;
808 smc_close_init(new_smc
);
809 smc_rx_init(new_smc
);
811 rc
= smc_clc_send_accept(new_smc
, local_contact
);
815 /* receive SMC Confirm CLC message */
816 reason_code
= smc_clc_wait_msg(new_smc
, &cclc
, sizeof(cclc
),
822 smc_conn_save_peer_info(new_smc
, &cclc
);
823 if (local_contact
== SMC_FIRST_CONTACT
)
824 smc_link_save_peer_info(link
, &cclc
);
826 rc
= smc_rmb_rtoken_handling(&new_smc
->conn
, &cclc
);
828 reason_code
= SMC_CLC_DECL_INTERR
;
832 if (local_contact
== SMC_FIRST_CONTACT
) {
833 rc
= smc_ib_ready_link(link
);
835 reason_code
= SMC_CLC_DECL_INTERR
;
838 /* QP confirmation over RoCE fabric */
839 reason_code
= smc_serv_conf_first_link(new_smc
);
840 if (reason_code
< 0) {
841 /* peer is not aware of a problem */
849 smc_tx_init(new_smc
);
852 sk_refcnt_debug_inc(newsmcsk
);
853 if (newsmcsk
->sk_state
== SMC_INIT
)
854 newsmcsk
->sk_state
= SMC_ACTIVE
;
856 if (local_contact
== SMC_FIRST_CONTACT
)
857 mutex_unlock(&smc_create_lgr_pending
);
858 lock_sock_nested(&lsmc
->sk
, SINGLE_DEPTH_NESTING
);
859 if (lsmc
->sk
.sk_state
== SMC_LISTEN
) {
860 smc_accept_enqueue(&lsmc
->sk
, newsmcsk
);
861 } else { /* no longer listening */
862 smc_close_non_accepted(newsmcsk
);
864 release_sock(&lsmc
->sk
);
867 lsmc
->sk
.sk_data_ready(&lsmc
->sk
);
868 sock_put(&lsmc
->sk
); /* sock_hold in smc_tcp_listen_work */
872 /* RDMA setup failed, switch back to TCP */
873 smc_conn_free(&new_smc
->conn
);
874 new_smc
->use_fallback
= true;
875 if (reason_code
&& (reason_code
!= SMC_CLC_DECL_REPLY
)) {
876 rc
= smc_clc_send_decline(new_smc
, reason_code
, 0);
877 if (rc
< sizeof(struct smc_clc_msg_decline
))
883 newsmcsk
->sk_state
= SMC_CLOSED
;
884 smc_conn_free(&new_smc
->conn
);
885 goto enqueue
; /* queue new sock with sk_err set */
888 static void smc_tcp_listen_work(struct work_struct
*work
)
890 struct smc_sock
*lsmc
= container_of(work
, struct smc_sock
,
892 struct smc_sock
*new_smc
;
895 lock_sock(&lsmc
->sk
);
896 while (lsmc
->sk
.sk_state
== SMC_LISTEN
) {
897 rc
= smc_clcsock_accept(lsmc
, &new_smc
);
903 new_smc
->listen_smc
= lsmc
;
904 new_smc
->use_fallback
= false; /* assume rdma capability first*/
905 sock_hold(&lsmc
->sk
); /* sock_put in smc_listen_work */
906 INIT_WORK(&new_smc
->smc_listen_work
, smc_listen_work
);
907 smc_copy_sock_settings_to_smc(new_smc
);
908 schedule_work(&new_smc
->smc_listen_work
);
912 release_sock(&lsmc
->sk
);
913 lsmc
->sk
.sk_data_ready(&lsmc
->sk
); /* no more listening, wake accept */
916 static int smc_listen(struct socket
*sock
, int backlog
)
918 struct sock
*sk
= sock
->sk
;
919 struct smc_sock
*smc
;
926 if ((sk
->sk_state
!= SMC_INIT
) && (sk
->sk_state
!= SMC_LISTEN
))
930 if (sk
->sk_state
== SMC_LISTEN
) {
931 sk
->sk_max_ack_backlog
= backlog
;
934 /* some socket options are handled in core, so we could not apply
935 * them to the clc socket -- copy smc socket options to clc socket
937 smc_copy_sock_settings_to_clc(smc
);
939 rc
= kernel_listen(smc
->clcsock
, backlog
);
942 sk
->sk_max_ack_backlog
= backlog
;
943 sk
->sk_ack_backlog
= 0;
944 sk
->sk_state
= SMC_LISTEN
;
945 INIT_WORK(&smc
->tcp_listen_work
, smc_tcp_listen_work
);
946 schedule_work(&smc
->tcp_listen_work
);
953 static int smc_accept(struct socket
*sock
, struct socket
*new_sock
,
954 int flags
, bool kern
)
956 struct sock
*sk
= sock
->sk
, *nsk
;
957 DECLARE_WAITQUEUE(wait
, current
);
958 struct smc_sock
*lsmc
;
965 if (lsmc
->sk
.sk_state
!= SMC_LISTEN
) {
970 /* Wait for an incoming connection */
971 timeo
= sock_rcvtimeo(sk
, flags
& O_NONBLOCK
);
972 add_wait_queue_exclusive(sk_sleep(sk
), &wait
);
973 while (!(nsk
= smc_accept_dequeue(sk
, new_sock
))) {
974 set_current_state(TASK_INTERRUPTIBLE
);
980 timeo
= schedule_timeout(timeo
);
981 /* wakeup by sk_data_ready in smc_listen_work() */
982 sched_annotate_sleep();
984 if (signal_pending(current
)) {
985 rc
= sock_intr_errno(timeo
);
989 set_current_state(TASK_RUNNING
);
990 remove_wait_queue(sk_sleep(sk
), &wait
);
993 rc
= sock_error(nsk
);
1000 static int smc_getname(struct socket
*sock
, struct sockaddr
*addr
,
1003 struct smc_sock
*smc
;
1005 if (peer
&& (sock
->sk
->sk_state
!= SMC_ACTIVE
) &&
1006 (sock
->sk
->sk_state
!= SMC_APPCLOSEWAIT1
))
1009 smc
= smc_sk(sock
->sk
);
1011 return smc
->clcsock
->ops
->getname(smc
->clcsock
, addr
, len
, peer
);
1014 static int smc_sendmsg(struct socket
*sock
, struct msghdr
*msg
, size_t len
)
1016 struct sock
*sk
= sock
->sk
;
1017 struct smc_sock
*smc
;
1022 if ((sk
->sk_state
!= SMC_ACTIVE
) &&
1023 (sk
->sk_state
!= SMC_APPCLOSEWAIT1
) &&
1024 (sk
->sk_state
!= SMC_INIT
))
1026 if (smc
->use_fallback
)
1027 rc
= smc
->clcsock
->ops
->sendmsg(smc
->clcsock
, msg
, len
);
1029 rc
= smc_tx_sendmsg(smc
, msg
, len
);
1035 static int smc_recvmsg(struct socket
*sock
, struct msghdr
*msg
, size_t len
,
1038 struct sock
*sk
= sock
->sk
;
1039 struct smc_sock
*smc
;
1044 if ((sk
->sk_state
== SMC_INIT
) ||
1045 (sk
->sk_state
== SMC_LISTEN
) ||
1046 (sk
->sk_state
== SMC_CLOSED
))
1049 if (sk
->sk_state
== SMC_PEERFINCLOSEWAIT
) {
1054 if (smc
->use_fallback
)
1055 rc
= smc
->clcsock
->ops
->recvmsg(smc
->clcsock
, msg
, len
, flags
);
1057 rc
= smc_rx_recvmsg(smc
, msg
, len
, flags
);
1064 static unsigned int smc_accept_poll(struct sock
*parent
)
1066 struct smc_sock
*isk
;
1070 list_for_each_entry(isk
, &smc_sk(parent
)->accept_q
, accept_q
) {
1071 sk
= (struct sock
*)isk
;
1073 if (sk
->sk_state
== SMC_ACTIVE
) {
1074 release_sock(parent
);
1075 return POLLIN
| POLLRDNORM
;
1078 release_sock(parent
);
1083 static unsigned int smc_poll(struct file
*file
, struct socket
*sock
,
1086 struct sock
*sk
= sock
->sk
;
1087 unsigned int mask
= 0;
1088 struct smc_sock
*smc
;
1091 smc
= smc_sk(sock
->sk
);
1092 if ((sk
->sk_state
== SMC_INIT
) || smc
->use_fallback
) {
1093 /* delegate to CLC child sock */
1094 mask
= smc
->clcsock
->ops
->poll(file
, smc
->clcsock
, wait
);
1095 /* if non-blocking connect finished ... */
1097 if ((sk
->sk_state
== SMC_INIT
) && (mask
& POLLOUT
)) {
1098 sk
->sk_err
= smc
->clcsock
->sk
->sk_err
;
1102 rc
= smc_connect_rdma(smc
);
1106 /* success cases including fallback */
1107 mask
|= POLLOUT
| POLLWRNORM
;
1112 sock_poll_wait(file
, sk_sleep(sk
), wait
);
1113 if (sk
->sk_state
== SMC_LISTEN
)
1114 /* woken up by sk_data_ready in smc_listen_work() */
1115 mask
|= smc_accept_poll(sk
);
1118 if (atomic_read(&smc
->conn
.sndbuf_space
) ||
1119 (sk
->sk_shutdown
& SEND_SHUTDOWN
)) {
1120 mask
|= POLLOUT
| POLLWRNORM
;
1122 sk_set_bit(SOCKWQ_ASYNC_NOSPACE
, sk
);
1123 set_bit(SOCK_NOSPACE
, &sk
->sk_socket
->flags
);
1125 if (atomic_read(&smc
->conn
.bytes_to_rcv
))
1126 mask
|= POLLIN
| POLLRDNORM
;
1127 if ((sk
->sk_shutdown
== SHUTDOWN_MASK
) ||
1128 (sk
->sk_state
== SMC_CLOSED
))
1130 if (sk
->sk_shutdown
& RCV_SHUTDOWN
)
1131 mask
|= POLLIN
| POLLRDNORM
| POLLRDHUP
;
1132 if (sk
->sk_state
== SMC_APPCLOSEWAIT1
)
1140 static int smc_shutdown(struct socket
*sock
, int how
)
1142 struct sock
*sk
= sock
->sk
;
1143 struct smc_sock
*smc
;
1149 if ((how
< SHUT_RD
) || (how
> SHUT_RDWR
))
1155 if ((sk
->sk_state
!= SMC_LISTEN
) &&
1156 (sk
->sk_state
!= SMC_ACTIVE
) &&
1157 (sk
->sk_state
!= SMC_PEERCLOSEWAIT1
) &&
1158 (sk
->sk_state
!= SMC_PEERCLOSEWAIT2
) &&
1159 (sk
->sk_state
!= SMC_APPCLOSEWAIT1
) &&
1160 (sk
->sk_state
!= SMC_APPCLOSEWAIT2
) &&
1161 (sk
->sk_state
!= SMC_APPFINCLOSEWAIT
))
1163 if (smc
->use_fallback
) {
1164 rc
= kernel_sock_shutdown(smc
->clcsock
, how
);
1165 sk
->sk_shutdown
= smc
->clcsock
->sk
->sk_shutdown
;
1166 if (sk
->sk_shutdown
== SHUTDOWN_MASK
)
1167 sk
->sk_state
= SMC_CLOSED
;
1171 case SHUT_RDWR
: /* shutdown in both directions */
1172 rc
= smc_close_active(smc
);
1175 rc
= smc_close_shutdown_write(smc
);
1178 if (sk
->sk_state
== SMC_LISTEN
)
1179 rc
= smc_close_active(smc
);
1182 /* nothing more to do because peer is not involved */
1185 rc1
= kernel_sock_shutdown(smc
->clcsock
, how
);
1186 /* map sock_shutdown_cmd constants to sk_shutdown value range */
1187 sk
->sk_shutdown
|= how
+ 1;
1191 return rc
? rc
: rc1
;
1194 static int smc_setsockopt(struct socket
*sock
, int level
, int optname
,
1195 char __user
*optval
, unsigned int optlen
)
1197 struct sock
*sk
= sock
->sk
;
1198 struct smc_sock
*smc
;
1202 /* generic setsockopts reaching us here always apply to the
1205 return smc
->clcsock
->ops
->setsockopt(smc
->clcsock
, level
, optname
,
1209 static int smc_getsockopt(struct socket
*sock
, int level
, int optname
,
1210 char __user
*optval
, int __user
*optlen
)
1212 struct smc_sock
*smc
;
1214 smc
= smc_sk(sock
->sk
);
1215 /* socket options apply to the CLC socket */
1216 return smc
->clcsock
->ops
->getsockopt(smc
->clcsock
, level
, optname
,
1220 static int smc_ioctl(struct socket
*sock
, unsigned int cmd
,
1223 struct smc_sock
*smc
;
1225 smc
= smc_sk(sock
->sk
);
1226 if (smc
->use_fallback
)
1227 return smc
->clcsock
->ops
->ioctl(smc
->clcsock
, cmd
, arg
);
1229 return sock_no_ioctl(sock
, cmd
, arg
);
1232 static ssize_t
smc_sendpage(struct socket
*sock
, struct page
*page
,
1233 int offset
, size_t size
, int flags
)
1235 struct sock
*sk
= sock
->sk
;
1236 struct smc_sock
*smc
;
1241 if (sk
->sk_state
!= SMC_ACTIVE
)
1243 if (smc
->use_fallback
)
1244 rc
= kernel_sendpage(smc
->clcsock
, page
, offset
,
1247 rc
= sock_no_sendpage(sock
, page
, offset
, size
, flags
);
1254 static ssize_t
smc_splice_read(struct socket
*sock
, loff_t
*ppos
,
1255 struct pipe_inode_info
*pipe
, size_t len
,
1258 struct sock
*sk
= sock
->sk
;
1259 struct smc_sock
*smc
;
1264 if ((sk
->sk_state
!= SMC_ACTIVE
) && (sk
->sk_state
!= SMC_CLOSED
))
1266 if (smc
->use_fallback
) {
1267 rc
= smc
->clcsock
->ops
->splice_read(smc
->clcsock
, ppos
,
1277 /* must look like tcp */
1278 static const struct proto_ops smc_sock_ops
= {
1280 .owner
= THIS_MODULE
,
1281 .release
= smc_release
,
1283 .connect
= smc_connect
,
1284 .socketpair
= sock_no_socketpair
,
1285 .accept
= smc_accept
,
1286 .getname
= smc_getname
,
1289 .listen
= smc_listen
,
1290 .shutdown
= smc_shutdown
,
1291 .setsockopt
= smc_setsockopt
,
1292 .getsockopt
= smc_getsockopt
,
1293 .sendmsg
= smc_sendmsg
,
1294 .recvmsg
= smc_recvmsg
,
1295 .mmap
= sock_no_mmap
,
1296 .sendpage
= smc_sendpage
,
1297 .splice_read
= smc_splice_read
,
1300 static int smc_create(struct net
*net
, struct socket
*sock
, int protocol
,
1303 struct smc_sock
*smc
;
1307 rc
= -ESOCKTNOSUPPORT
;
1308 if (sock
->type
!= SOCK_STREAM
)
1311 rc
= -EPROTONOSUPPORT
;
1312 if ((protocol
!= IPPROTO_IP
) && (protocol
!= IPPROTO_TCP
))
1316 sock
->ops
= &smc_sock_ops
;
1317 sk
= smc_sock_alloc(net
, sock
);
1321 /* create internal TCP socket for CLC handshake and fallback */
1323 smc
->use_fallback
= false; /* assume rdma capability first */
1324 rc
= sock_create_kern(net
, PF_INET
, SOCK_STREAM
,
1325 IPPROTO_TCP
, &smc
->clcsock
);
1327 sk_common_release(sk
);
1328 smc
->sk
.sk_sndbuf
= max(smc
->clcsock
->sk
->sk_sndbuf
, SMC_BUF_MIN_SIZE
);
1329 smc
->sk
.sk_rcvbuf
= max(smc
->clcsock
->sk
->sk_rcvbuf
, SMC_BUF_MIN_SIZE
);
1335 static const struct net_proto_family smc_sock_family_ops
= {
1337 .owner
= THIS_MODULE
,
1338 .create
= smc_create
,
1341 static int __init
smc_init(void)
1345 rc
= smc_pnet_init();
1349 rc
= smc_llc_init();
1351 pr_err("%s: smc_llc_init fails with %d\n", __func__
, rc
);
1355 rc
= smc_cdc_init();
1357 pr_err("%s: smc_cdc_init fails with %d\n", __func__
, rc
);
1361 rc
= proto_register(&smc_proto
, 1);
1363 pr_err("%s: proto_register fails with %d\n", __func__
, rc
);
1367 rc
= sock_register(&smc_sock_family_ops
);
1369 pr_err("%s: sock_register fails with %d\n", __func__
, rc
);
1372 INIT_HLIST_HEAD(&smc_v4_hashinfo
.ht
);
1374 rc
= smc_ib_register_client();
1376 pr_err("%s: ib_register fails with %d\n", __func__
, rc
);
1383 sock_unregister(PF_SMC
);
1385 proto_unregister(&smc_proto
);
1391 static void __exit
smc_exit(void)
1393 struct smc_link_group
*lgr
, *lg
;
1394 LIST_HEAD(lgr_freeing_list
);
1396 spin_lock_bh(&smc_lgr_list
.lock
);
1397 if (!list_empty(&smc_lgr_list
.list
))
1398 list_splice_init(&smc_lgr_list
.list
, &lgr_freeing_list
);
1399 spin_unlock_bh(&smc_lgr_list
.lock
);
1400 list_for_each_entry_safe(lgr
, lg
, &lgr_freeing_list
, list
) {
1401 list_del_init(&lgr
->list
);
1402 smc_lgr_free(lgr
); /* free link group */
1404 smc_ib_unregister_client();
1405 sock_unregister(PF_SMC
);
1406 proto_unregister(&smc_proto
);
1410 module_init(smc_init
);
1411 module_exit(smc_exit
);
1413 MODULE_AUTHOR("Ursula Braun <ubraun@linux.vnet.ibm.com>");
1414 MODULE_DESCRIPTION("smc socket address family");
1415 MODULE_LICENSE("GPL");
1416 MODULE_ALIAS_NETPROTO(PF_SMC
);