1 // SPDX-License-Identifier: GPL-2.0-only
3 * Shared Memory Communications over RDMA (SMC-R) and RoCE
5 * AF_SMC protocol family socket handler keeping the AF_INET sock address type
6 * applies to SOCK_STREAM sockets only
7 * offers an alternative communication option for TCP-protocol sockets
8 * applicable with RoCE-cards only
10 * Initial restrictions:
11 * - support for alternate links postponed
13 * Copyright IBM Corp. 2016, 2018
15 * Author(s): Ursula Braun <ubraun@linux.vnet.ibm.com>
16 * based on prototype from Frank Blaschka
19 #define KMSG_COMPONENT "smc"
20 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
22 #include <linux/module.h>
23 #include <linux/socket.h>
24 #include <linux/workqueue.h>
26 #include <linux/sched/signal.h>
27 #include <linux/if_vlan.h>
28 #include <linux/rcupdate_wait.h>
33 #include <asm/ioctls.h>
35 #include <net/net_namespace.h>
36 #include <net/netns/generic.h>
37 #include "smc_netns.h"
49 #include "smc_close.h"
51 static DEFINE_MUTEX(smc_server_lgr_pending
); /* serialize link group
54 static DEFINE_MUTEX(smc_client_lgr_pending
); /* serialize link group
58 static void smc_tcp_listen_work(struct work_struct
*);
59 static void smc_connect_work(struct work_struct
*);
61 static void smc_set_keepalive(struct sock
*sk
, int val
)
63 struct smc_sock
*smc
= smc_sk(sk
);
65 smc
->clcsock
->sk
->sk_prot
->keepalive(smc
->clcsock
->sk
, val
);
68 static struct smc_hashinfo smc_v4_hashinfo
= {
69 .lock
= __RW_LOCK_UNLOCKED(smc_v4_hashinfo
.lock
),
72 static struct smc_hashinfo smc_v6_hashinfo
= {
73 .lock
= __RW_LOCK_UNLOCKED(smc_v6_hashinfo
.lock
),
76 int smc_hash_sk(struct sock
*sk
)
78 struct smc_hashinfo
*h
= sk
->sk_prot
->h
.smc_hash
;
79 struct hlist_head
*head
;
83 write_lock_bh(&h
->lock
);
84 sk_add_node(sk
, head
);
85 sock_prot_inuse_add(sock_net(sk
), sk
->sk_prot
, 1);
86 write_unlock_bh(&h
->lock
);
90 EXPORT_SYMBOL_GPL(smc_hash_sk
);
92 void smc_unhash_sk(struct sock
*sk
)
94 struct smc_hashinfo
*h
= sk
->sk_prot
->h
.smc_hash
;
96 write_lock_bh(&h
->lock
);
97 if (sk_del_node_init(sk
))
98 sock_prot_inuse_add(sock_net(sk
), sk
->sk_prot
, -1);
99 write_unlock_bh(&h
->lock
);
101 EXPORT_SYMBOL_GPL(smc_unhash_sk
);
103 struct proto smc_proto
= {
105 .owner
= THIS_MODULE
,
106 .keepalive
= smc_set_keepalive
,
108 .unhash
= smc_unhash_sk
,
109 .obj_size
= sizeof(struct smc_sock
),
110 .h
.smc_hash
= &smc_v4_hashinfo
,
111 .slab_flags
= SLAB_TYPESAFE_BY_RCU
,
113 EXPORT_SYMBOL_GPL(smc_proto
);
115 struct proto smc_proto6
= {
117 .owner
= THIS_MODULE
,
118 .keepalive
= smc_set_keepalive
,
120 .unhash
= smc_unhash_sk
,
121 .obj_size
= sizeof(struct smc_sock
),
122 .h
.smc_hash
= &smc_v6_hashinfo
,
123 .slab_flags
= SLAB_TYPESAFE_BY_RCU
,
125 EXPORT_SYMBOL_GPL(smc_proto6
);
127 static void smc_restore_fallback_changes(struct smc_sock
*smc
)
129 smc
->clcsock
->file
->private_data
= smc
->sk
.sk_socket
;
130 smc
->clcsock
->file
= NULL
;
133 static int __smc_release(struct smc_sock
*smc
)
135 struct sock
*sk
= &smc
->sk
;
138 if (!smc
->use_fallback
) {
139 rc
= smc_close_active(smc
);
140 sock_set_flag(sk
, SOCK_DEAD
);
141 sk
->sk_shutdown
|= SHUTDOWN_MASK
;
143 if (sk
->sk_state
!= SMC_LISTEN
&& sk
->sk_state
!= SMC_INIT
)
144 sock_put(sk
); /* passive closing */
145 if (sk
->sk_state
== SMC_LISTEN
) {
146 /* wake up clcsock accept */
147 rc
= kernel_sock_shutdown(smc
->clcsock
, SHUT_RDWR
);
149 sk
->sk_state
= SMC_CLOSED
;
150 sk
->sk_state_change(sk
);
151 smc_restore_fallback_changes(smc
);
154 sk
->sk_prot
->unhash(sk
);
156 if (sk
->sk_state
== SMC_CLOSED
) {
159 smc_clcsock_release(smc
);
162 if (!smc
->use_fallback
)
163 smc_conn_free(&smc
->conn
);
169 static int smc_release(struct socket
*sock
)
171 struct sock
*sk
= sock
->sk
;
172 struct smc_sock
*smc
;
178 sock_hold(sk
); /* sock_put below */
181 /* cleanup for a dangling non-blocking connect */
182 if (smc
->connect_nonblock
&& sk
->sk_state
== SMC_INIT
)
183 tcp_abort(smc
->clcsock
->sk
, ECONNABORTED
);
184 flush_work(&smc
->connect_work
);
186 if (sk
->sk_state
== SMC_LISTEN
)
187 /* smc_close_non_accepted() is called and acquires
188 * sock lock for child sockets again
190 lock_sock_nested(sk
, SINGLE_DEPTH_NESTING
);
194 rc
= __smc_release(smc
);
201 sock_put(sk
); /* sock_hold above */
202 sock_put(sk
); /* final sock_put */
207 static void smc_destruct(struct sock
*sk
)
209 if (sk
->sk_state
!= SMC_CLOSED
)
211 if (!sock_flag(sk
, SOCK_DEAD
))
214 sk_refcnt_debug_dec(sk
);
217 static struct sock
*smc_sock_alloc(struct net
*net
, struct socket
*sock
,
220 struct smc_sock
*smc
;
224 prot
= (protocol
== SMCPROTO_SMC6
) ? &smc_proto6
: &smc_proto
;
225 sk
= sk_alloc(net
, PF_SMC
, GFP_KERNEL
, prot
, 0);
229 sock_init_data(sock
, sk
); /* sets sk_refcnt to 1 */
230 sk
->sk_state
= SMC_INIT
;
231 sk
->sk_destruct
= smc_destruct
;
232 sk
->sk_protocol
= protocol
;
234 INIT_WORK(&smc
->tcp_listen_work
, smc_tcp_listen_work
);
235 INIT_WORK(&smc
->connect_work
, smc_connect_work
);
236 INIT_DELAYED_WORK(&smc
->conn
.tx_work
, smc_tx_work
);
237 INIT_LIST_HEAD(&smc
->accept_q
);
238 spin_lock_init(&smc
->accept_q_lock
);
239 spin_lock_init(&smc
->conn
.send_lock
);
240 sk
->sk_prot
->hash(sk
);
241 sk_refcnt_debug_inc(sk
);
242 mutex_init(&smc
->clcsock_release_lock
);
247 static int smc_bind(struct socket
*sock
, struct sockaddr
*uaddr
,
250 struct sockaddr_in
*addr
= (struct sockaddr_in
*)uaddr
;
251 struct sock
*sk
= sock
->sk
;
252 struct smc_sock
*smc
;
257 /* replicate tests from inet_bind(), to be safe wrt. future changes */
259 if (addr_len
< sizeof(struct sockaddr_in
))
263 if (addr
->sin_family
!= AF_INET
&&
264 addr
->sin_family
!= AF_INET6
&&
265 addr
->sin_family
!= AF_UNSPEC
)
267 /* accept AF_UNSPEC (mapped to AF_INET) only if s_addr is INADDR_ANY */
268 if (addr
->sin_family
== AF_UNSPEC
&&
269 addr
->sin_addr
.s_addr
!= htonl(INADDR_ANY
))
274 /* Check if socket is already active */
276 if (sk
->sk_state
!= SMC_INIT
|| smc
->connect_nonblock
)
279 smc
->clcsock
->sk
->sk_reuse
= sk
->sk_reuse
;
280 rc
= kernel_bind(smc
->clcsock
, uaddr
, addr_len
);
288 static void smc_copy_sock_settings(struct sock
*nsk
, struct sock
*osk
,
291 /* options we don't get control via setsockopt for */
292 nsk
->sk_type
= osk
->sk_type
;
293 nsk
->sk_sndbuf
= osk
->sk_sndbuf
;
294 nsk
->sk_rcvbuf
= osk
->sk_rcvbuf
;
295 nsk
->sk_sndtimeo
= osk
->sk_sndtimeo
;
296 nsk
->sk_rcvtimeo
= osk
->sk_rcvtimeo
;
297 nsk
->sk_mark
= osk
->sk_mark
;
298 nsk
->sk_priority
= osk
->sk_priority
;
299 nsk
->sk_rcvlowat
= osk
->sk_rcvlowat
;
300 nsk
->sk_bound_dev_if
= osk
->sk_bound_dev_if
;
301 nsk
->sk_err
= osk
->sk_err
;
303 nsk
->sk_flags
&= ~mask
;
304 nsk
->sk_flags
|= osk
->sk_flags
& mask
;
307 #define SK_FLAGS_SMC_TO_CLC ((1UL << SOCK_URGINLINE) | \
308 (1UL << SOCK_KEEPOPEN) | \
309 (1UL << SOCK_LINGER) | \
310 (1UL << SOCK_BROADCAST) | \
311 (1UL << SOCK_TIMESTAMP) | \
312 (1UL << SOCK_DBG) | \
313 (1UL << SOCK_RCVTSTAMP) | \
314 (1UL << SOCK_RCVTSTAMPNS) | \
315 (1UL << SOCK_LOCALROUTE) | \
316 (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE) | \
317 (1UL << SOCK_RXQ_OVFL) | \
318 (1UL << SOCK_WIFI_STATUS) | \
319 (1UL << SOCK_NOFCS) | \
320 (1UL << SOCK_FILTER_LOCKED) | \
321 (1UL << SOCK_TSTAMP_NEW))
322 /* copy only relevant settings and flags of SOL_SOCKET level from smc to
323 * clc socket (since smc is not called for these options from net/core)
325 static void smc_copy_sock_settings_to_clc(struct smc_sock
*smc
)
327 smc_copy_sock_settings(smc
->clcsock
->sk
, &smc
->sk
, SK_FLAGS_SMC_TO_CLC
);
330 #define SK_FLAGS_CLC_TO_SMC ((1UL << SOCK_URGINLINE) | \
331 (1UL << SOCK_KEEPOPEN) | \
332 (1UL << SOCK_LINGER) | \
334 /* copy only settings and flags relevant for smc from clc to smc socket */
335 static void smc_copy_sock_settings_to_smc(struct smc_sock
*smc
)
337 smc_copy_sock_settings(&smc
->sk
, smc
->clcsock
->sk
, SK_FLAGS_CLC_TO_SMC
);
340 /* register a new rmb, send confirm_rkey msg to register with peer */
341 static int smc_reg_rmb(struct smc_link
*link
, struct smc_buf_desc
*rmb_desc
,
344 if (!rmb_desc
->wr_reg
) {
345 /* register memory region for new rmb */
346 if (smc_wr_reg_send(link
, rmb_desc
->mr_rx
[SMC_SINGLE_LINK
])) {
347 rmb_desc
->regerr
= 1;
350 rmb_desc
->wr_reg
= 1;
354 /* exchange confirm_rkey msg with peer */
355 if (smc_llc_do_confirm_rkey(link
, rmb_desc
)) {
356 rmb_desc
->regerr
= 1;
362 static int smc_clnt_conf_first_link(struct smc_sock
*smc
)
364 struct net
*net
= sock_net(smc
->clcsock
->sk
);
365 struct smc_link_group
*lgr
= smc
->conn
.lgr
;
366 struct smc_link
*link
;
370 link
= &lgr
->lnk
[SMC_SINGLE_LINK
];
371 /* receive CONFIRM LINK request from server over RoCE fabric */
372 rest
= wait_for_completion_interruptible_timeout(
374 SMC_LLC_WAIT_FIRST_TIME
);
376 struct smc_clc_msg_decline dclc
;
378 rc
= smc_clc_wait_msg(smc
, &dclc
, sizeof(dclc
),
379 SMC_CLC_DECLINE
, CLC_WAIT_TIME_SHORT
);
380 return rc
== -EAGAIN
? SMC_CLC_DECL_TIMEOUT_CL
: rc
;
383 if (link
->llc_confirm_rc
)
384 return SMC_CLC_DECL_RMBE_EC
;
386 rc
= smc_ib_modify_qp_rts(link
);
388 return SMC_CLC_DECL_ERR_RDYLNK
;
390 smc_wr_remember_qp_attr(link
);
392 if (smc_reg_rmb(link
, smc
->conn
.rmb_desc
, false))
393 return SMC_CLC_DECL_ERR_REGRMB
;
395 /* send CONFIRM LINK response over RoCE fabric */
396 rc
= smc_llc_send_confirm_link(link
, SMC_LLC_RESP
);
398 return SMC_CLC_DECL_TIMEOUT_CL
;
400 /* receive ADD LINK request from server over RoCE fabric */
401 rest
= wait_for_completion_interruptible_timeout(&link
->llc_add
,
404 struct smc_clc_msg_decline dclc
;
406 rc
= smc_clc_wait_msg(smc
, &dclc
, sizeof(dclc
),
407 SMC_CLC_DECLINE
, CLC_WAIT_TIME_SHORT
);
408 return rc
== -EAGAIN
? SMC_CLC_DECL_TIMEOUT_AL
: rc
;
411 /* send add link reject message, only one link supported for now */
412 rc
= smc_llc_send_add_link(link
,
413 link
->smcibdev
->mac
[link
->ibport
- 1],
414 link
->gid
, SMC_LLC_RESP
);
416 return SMC_CLC_DECL_TIMEOUT_AL
;
418 smc_llc_link_active(link
, net
->ipv4
.sysctl_tcp_keepalive_time
);
423 static void smcr_conn_save_peer_info(struct smc_sock
*smc
,
424 struct smc_clc_msg_accept_confirm
*clc
)
426 int bufsize
= smc_uncompress_bufsize(clc
->rmbe_size
);
428 smc
->conn
.peer_rmbe_idx
= clc
->rmbe_idx
;
429 smc
->conn
.local_tx_ctrl
.token
= ntohl(clc
->rmbe_alert_token
);
430 smc
->conn
.peer_rmbe_size
= bufsize
;
431 atomic_set(&smc
->conn
.peer_rmbe_space
, smc
->conn
.peer_rmbe_size
);
432 smc
->conn
.tx_off
= bufsize
* (smc
->conn
.peer_rmbe_idx
- 1);
435 static void smcd_conn_save_peer_info(struct smc_sock
*smc
,
436 struct smc_clc_msg_accept_confirm
*clc
)
438 int bufsize
= smc_uncompress_bufsize(clc
->dmbe_size
);
440 smc
->conn
.peer_rmbe_idx
= clc
->dmbe_idx
;
441 smc
->conn
.peer_token
= clc
->token
;
442 /* msg header takes up space in the buffer */
443 smc
->conn
.peer_rmbe_size
= bufsize
- sizeof(struct smcd_cdc_msg
);
444 atomic_set(&smc
->conn
.peer_rmbe_space
, smc
->conn
.peer_rmbe_size
);
445 smc
->conn
.tx_off
= bufsize
* smc
->conn
.peer_rmbe_idx
;
448 static void smc_conn_save_peer_info(struct smc_sock
*smc
,
449 struct smc_clc_msg_accept_confirm
*clc
)
451 if (smc
->conn
.lgr
->is_smcd
)
452 smcd_conn_save_peer_info(smc
, clc
);
454 smcr_conn_save_peer_info(smc
, clc
);
457 static void smc_link_save_peer_info(struct smc_link
*link
,
458 struct smc_clc_msg_accept_confirm
*clc
)
460 link
->peer_qpn
= ntoh24(clc
->qpn
);
461 memcpy(link
->peer_gid
, clc
->lcl
.gid
, SMC_GID_SIZE
);
462 memcpy(link
->peer_mac
, clc
->lcl
.mac
, sizeof(link
->peer_mac
));
463 link
->peer_psn
= ntoh24(clc
->psn
);
464 link
->peer_mtu
= clc
->qp_mtu
;
467 static void smc_switch_to_fallback(struct smc_sock
*smc
)
469 smc
->use_fallback
= true;
470 if (smc
->sk
.sk_socket
&& smc
->sk
.sk_socket
->file
) {
471 smc
->clcsock
->file
= smc
->sk
.sk_socket
->file
;
472 smc
->clcsock
->file
->private_data
= smc
->clcsock
;
473 smc
->clcsock
->wq
.fasync_list
=
474 smc
->sk
.sk_socket
->wq
.fasync_list
;
478 /* fall back during connect */
479 static int smc_connect_fallback(struct smc_sock
*smc
, int reason_code
)
481 smc_switch_to_fallback(smc
);
482 smc
->fallback_rsn
= reason_code
;
483 smc_copy_sock_settings_to_clc(smc
);
484 smc
->connect_nonblock
= 0;
485 if (smc
->sk
.sk_state
== SMC_INIT
)
486 smc
->sk
.sk_state
= SMC_ACTIVE
;
490 /* decline and fall back during connect */
491 static int smc_connect_decline_fallback(struct smc_sock
*smc
, int reason_code
)
495 if (reason_code
< 0) { /* error, fallback is not possible */
496 if (smc
->sk
.sk_state
== SMC_INIT
)
497 sock_put(&smc
->sk
); /* passive closing */
500 if (reason_code
!= SMC_CLC_DECL_PEERDECL
) {
501 rc
= smc_clc_send_decline(smc
, reason_code
);
503 if (smc
->sk
.sk_state
== SMC_INIT
)
504 sock_put(&smc
->sk
); /* passive closing */
508 return smc_connect_fallback(smc
, reason_code
);
511 /* abort connecting */
512 static int smc_connect_abort(struct smc_sock
*smc
, int reason_code
,
515 bool is_smcd
= smc
->conn
.lgr
->is_smcd
;
517 if (local_contact
== SMC_FIRST_CONTACT
)
518 smc_lgr_cleanup_early(&smc
->conn
);
520 smc_conn_free(&smc
->conn
);
522 /* there is only one lgr role for SMC-D; use server lock */
523 mutex_unlock(&smc_server_lgr_pending
);
525 mutex_unlock(&smc_client_lgr_pending
);
527 smc
->connect_nonblock
= 0;
531 /* check if there is a rdma device available for this connection. */
532 /* called for connect and listen */
533 static int smc_find_rdma_device(struct smc_sock
*smc
, struct smc_init_info
*ini
)
535 /* PNET table look up: search active ib_device and port
536 * within same PNETID that also contains the ethernet device
537 * used for the internal TCP socket
539 smc_pnet_find_roce_resource(smc
->clcsock
->sk
, ini
);
541 return SMC_CLC_DECL_NOSMCRDEV
;
545 /* check if there is an ISM device available for this connection. */
546 /* called for connect and listen */
547 static int smc_find_ism_device(struct smc_sock
*smc
, struct smc_init_info
*ini
)
549 /* Find ISM device with same PNETID as connecting interface */
550 smc_pnet_find_ism_resource(smc
->clcsock
->sk
, ini
);
552 return SMC_CLC_DECL_NOSMCDDEV
;
556 /* Check for VLAN ID and register it on ISM device just for CLC handshake */
557 static int smc_connect_ism_vlan_setup(struct smc_sock
*smc
,
558 struct smc_init_info
*ini
)
560 if (ini
->vlan_id
&& smc_ism_get_vlan(ini
->ism_dev
, ini
->vlan_id
))
561 return SMC_CLC_DECL_ISMVLANERR
;
565 /* cleanup temporary VLAN ID registration used for CLC handshake. If ISM is
566 * used, the VLAN ID will be registered again during the connection setup.
568 static int smc_connect_ism_vlan_cleanup(struct smc_sock
*smc
, bool is_smcd
,
569 struct smc_init_info
*ini
)
573 if (ini
->vlan_id
&& smc_ism_put_vlan(ini
->ism_dev
, ini
->vlan_id
))
574 return SMC_CLC_DECL_CNFERR
;
578 /* CLC handshake during connect */
579 static int smc_connect_clc(struct smc_sock
*smc
, int smc_type
,
580 struct smc_clc_msg_accept_confirm
*aclc
,
581 struct smc_init_info
*ini
)
585 /* do inband token exchange */
586 rc
= smc_clc_send_proposal(smc
, smc_type
, ini
);
589 /* receive SMC Accept CLC message */
590 return smc_clc_wait_msg(smc
, aclc
, sizeof(*aclc
), SMC_CLC_ACCEPT
,
594 /* setup for RDMA connection of client */
595 static int smc_connect_rdma(struct smc_sock
*smc
,
596 struct smc_clc_msg_accept_confirm
*aclc
,
597 struct smc_init_info
*ini
)
599 struct smc_link
*link
;
602 ini
->is_smcd
= false;
603 ini
->ib_lcl
= &aclc
->lcl
;
604 ini
->ib_clcqpn
= ntoh24(aclc
->qpn
);
605 ini
->srv_first_contact
= aclc
->hdr
.flag
;
607 mutex_lock(&smc_client_lgr_pending
);
608 reason_code
= smc_conn_create(smc
, ini
);
610 mutex_unlock(&smc_client_lgr_pending
);
613 link
= &smc
->conn
.lgr
->lnk
[SMC_SINGLE_LINK
];
615 smc_conn_save_peer_info(smc
, aclc
);
617 /* create send buffer and rmb */
618 if (smc_buf_create(smc
, false))
619 return smc_connect_abort(smc
, SMC_CLC_DECL_MEM
,
620 ini
->cln_first_contact
);
622 if (ini
->cln_first_contact
== SMC_FIRST_CONTACT
)
623 smc_link_save_peer_info(link
, aclc
);
625 if (smc_rmb_rtoken_handling(&smc
->conn
, aclc
))
626 return smc_connect_abort(smc
, SMC_CLC_DECL_ERR_RTOK
,
627 ini
->cln_first_contact
);
632 if (ini
->cln_first_contact
== SMC_FIRST_CONTACT
) {
633 if (smc_ib_ready_link(link
))
634 return smc_connect_abort(smc
, SMC_CLC_DECL_ERR_RDYLNK
,
635 ini
->cln_first_contact
);
637 if (smc_reg_rmb(link
, smc
->conn
.rmb_desc
, true))
638 return smc_connect_abort(smc
, SMC_CLC_DECL_ERR_REGRMB
,
639 ini
->cln_first_contact
);
641 smc_rmb_sync_sg_for_device(&smc
->conn
);
643 reason_code
= smc_clc_send_confirm(smc
);
645 return smc_connect_abort(smc
, reason_code
,
646 ini
->cln_first_contact
);
650 if (ini
->cln_first_contact
== SMC_FIRST_CONTACT
) {
651 /* QP confirmation over RoCE fabric */
652 reason_code
= smc_clnt_conf_first_link(smc
);
654 return smc_connect_abort(smc
, reason_code
,
655 ini
->cln_first_contact
);
657 mutex_unlock(&smc_client_lgr_pending
);
659 smc_copy_sock_settings_to_clc(smc
);
660 smc
->connect_nonblock
= 0;
661 if (smc
->sk
.sk_state
== SMC_INIT
)
662 smc
->sk
.sk_state
= SMC_ACTIVE
;
667 /* setup for ISM connection of client */
668 static int smc_connect_ism(struct smc_sock
*smc
,
669 struct smc_clc_msg_accept_confirm
*aclc
,
670 struct smc_init_info
*ini
)
675 ini
->ism_gid
= aclc
->gid
;
676 ini
->srv_first_contact
= aclc
->hdr
.flag
;
678 /* there is only one lgr role for SMC-D; use server lock */
679 mutex_lock(&smc_server_lgr_pending
);
680 rc
= smc_conn_create(smc
, ini
);
682 mutex_unlock(&smc_server_lgr_pending
);
686 /* Create send and receive buffers */
687 if (smc_buf_create(smc
, true))
688 return smc_connect_abort(smc
, SMC_CLC_DECL_MEM
,
689 ini
->cln_first_contact
);
691 smc_conn_save_peer_info(smc
, aclc
);
696 rc
= smc_clc_send_confirm(smc
);
698 return smc_connect_abort(smc
, rc
, ini
->cln_first_contact
);
699 mutex_unlock(&smc_server_lgr_pending
);
701 smc_copy_sock_settings_to_clc(smc
);
702 smc
->connect_nonblock
= 0;
703 if (smc
->sk
.sk_state
== SMC_INIT
)
704 smc
->sk
.sk_state
= SMC_ACTIVE
;
709 /* perform steps before actually connecting */
710 static int __smc_connect(struct smc_sock
*smc
)
712 bool ism_supported
= false, rdma_supported
= false;
713 struct smc_clc_msg_accept_confirm aclc
;
714 struct smc_init_info ini
= {0};
718 if (smc
->use_fallback
)
719 return smc_connect_fallback(smc
, smc
->fallback_rsn
);
721 /* if peer has not signalled SMC-capability, fall back */
722 if (!tcp_sk(smc
->clcsock
->sk
)->syn_smc
)
723 return smc_connect_fallback(smc
, SMC_CLC_DECL_PEERNOSMC
);
725 /* IPSec connections opt out of SMC-R optimizations */
726 if (using_ipsec(smc
))
727 return smc_connect_decline_fallback(smc
, SMC_CLC_DECL_IPSEC
);
729 /* get vlan id from IP device */
730 if (smc_vlan_by_tcpsk(smc
->clcsock
, &ini
))
731 return smc_connect_decline_fallback(smc
,
732 SMC_CLC_DECL_GETVLANERR
);
734 /* check if there is an ism device available */
735 if (!smc_find_ism_device(smc
, &ini
) &&
736 !smc_connect_ism_vlan_setup(smc
, &ini
)) {
737 /* ISM is supported for this connection */
738 ism_supported
= true;
739 smc_type
= SMC_TYPE_D
;
742 /* check if there is a rdma device available */
743 if (!smc_find_rdma_device(smc
, &ini
)) {
744 /* RDMA is supported for this connection */
745 rdma_supported
= true;
747 smc_type
= SMC_TYPE_B
; /* both */
749 smc_type
= SMC_TYPE_R
; /* only RDMA */
752 /* if neither ISM nor RDMA are supported, fallback */
753 if (!rdma_supported
&& !ism_supported
)
754 return smc_connect_decline_fallback(smc
, SMC_CLC_DECL_NOSMCDEV
);
756 /* perform CLC handshake */
757 rc
= smc_connect_clc(smc
, smc_type
, &aclc
, &ini
);
759 smc_connect_ism_vlan_cleanup(smc
, ism_supported
, &ini
);
760 return smc_connect_decline_fallback(smc
, rc
);
763 /* depending on previous steps, connect using rdma or ism */
764 if (rdma_supported
&& aclc
.hdr
.path
== SMC_TYPE_R
)
765 rc
= smc_connect_rdma(smc
, &aclc
, &ini
);
766 else if (ism_supported
&& aclc
.hdr
.path
== SMC_TYPE_D
)
767 rc
= smc_connect_ism(smc
, &aclc
, &ini
);
769 rc
= SMC_CLC_DECL_MODEUNSUPP
;
771 smc_connect_ism_vlan_cleanup(smc
, ism_supported
, &ini
);
772 return smc_connect_decline_fallback(smc
, rc
);
775 smc_connect_ism_vlan_cleanup(smc
, ism_supported
, &ini
);
779 static void smc_connect_work(struct work_struct
*work
)
781 struct smc_sock
*smc
= container_of(work
, struct smc_sock
,
783 long timeo
= smc
->sk
.sk_sndtimeo
;
787 timeo
= MAX_SCHEDULE_TIMEOUT
;
788 lock_sock(smc
->clcsock
->sk
);
789 if (smc
->clcsock
->sk
->sk_err
) {
790 smc
->sk
.sk_err
= smc
->clcsock
->sk
->sk_err
;
791 } else if ((1 << smc
->clcsock
->sk
->sk_state
) &
792 (TCPF_SYN_SENT
| TCP_SYN_RECV
)) {
793 rc
= sk_stream_wait_connect(smc
->clcsock
->sk
, &timeo
);
794 if ((rc
== -EPIPE
) &&
795 ((1 << smc
->clcsock
->sk
->sk_state
) &
796 (TCPF_ESTABLISHED
| TCPF_CLOSE_WAIT
)))
799 release_sock(smc
->clcsock
->sk
);
801 if (rc
!= 0 || smc
->sk
.sk_err
) {
802 smc
->sk
.sk_state
= SMC_CLOSED
;
803 if (rc
== -EPIPE
|| rc
== -EAGAIN
)
804 smc
->sk
.sk_err
= EPIPE
;
805 else if (signal_pending(current
))
806 smc
->sk
.sk_err
= -sock_intr_errno(timeo
);
807 sock_put(&smc
->sk
); /* passive closing */
811 rc
= __smc_connect(smc
);
813 smc
->sk
.sk_err
= -rc
;
816 if (!sock_flag(&smc
->sk
, SOCK_DEAD
)) {
817 if (smc
->sk
.sk_err
) {
818 smc
->sk
.sk_state_change(&smc
->sk
);
819 } else { /* allow polling before and after fallback decision */
820 smc
->clcsock
->sk
->sk_write_space(smc
->clcsock
->sk
);
821 smc
->sk
.sk_write_space(&smc
->sk
);
824 release_sock(&smc
->sk
);
827 static int smc_connect(struct socket
*sock
, struct sockaddr
*addr
,
830 struct sock
*sk
= sock
->sk
;
831 struct smc_sock
*smc
;
836 /* separate smc parameter checking to be safe */
837 if (alen
< sizeof(addr
->sa_family
))
839 if (addr
->sa_family
!= AF_INET
&& addr
->sa_family
!= AF_INET6
)
843 switch (sk
->sk_state
) {
854 smc_copy_sock_settings_to_clc(smc
);
855 tcp_sk(smc
->clcsock
->sk
)->syn_smc
= 1;
856 if (smc
->connect_nonblock
) {
860 rc
= kernel_connect(smc
->clcsock
, addr
, alen
, flags
);
861 if (rc
&& rc
!= -EINPROGRESS
)
864 sock_hold(&smc
->sk
); /* sock put in passive closing */
865 if (smc
->use_fallback
)
867 if (flags
& O_NONBLOCK
) {
868 if (schedule_work(&smc
->connect_work
))
869 smc
->connect_nonblock
= 1;
872 rc
= __smc_connect(smc
);
876 rc
= 0; /* success cases including fallback */
885 static int smc_clcsock_accept(struct smc_sock
*lsmc
, struct smc_sock
**new_smc
)
887 struct socket
*new_clcsock
= NULL
;
888 struct sock
*lsk
= &lsmc
->sk
;
893 new_sk
= smc_sock_alloc(sock_net(lsk
), NULL
, lsk
->sk_protocol
);
896 lsk
->sk_err
= ENOMEM
;
901 *new_smc
= smc_sk(new_sk
);
903 mutex_lock(&lsmc
->clcsock_release_lock
);
905 rc
= kernel_accept(lsmc
->clcsock
, &new_clcsock
, 0);
906 mutex_unlock(&lsmc
->clcsock_release_lock
);
910 if (rc
< 0 || lsk
->sk_state
== SMC_CLOSED
) {
911 new_sk
->sk_prot
->unhash(new_sk
);
913 sock_release(new_clcsock
);
914 new_sk
->sk_state
= SMC_CLOSED
;
915 sock_set_flag(new_sk
, SOCK_DEAD
);
916 sock_put(new_sk
); /* final */
921 (*new_smc
)->clcsock
= new_clcsock
;
926 /* add a just created sock to the accept queue of the listen sock as
927 * candidate for a following socket accept call from user space
929 static void smc_accept_enqueue(struct sock
*parent
, struct sock
*sk
)
931 struct smc_sock
*par
= smc_sk(parent
);
933 sock_hold(sk
); /* sock_put in smc_accept_unlink () */
934 spin_lock(&par
->accept_q_lock
);
935 list_add_tail(&smc_sk(sk
)->accept_q
, &par
->accept_q
);
936 spin_unlock(&par
->accept_q_lock
);
937 sk_acceptq_added(parent
);
940 /* remove a socket from the accept queue of its parental listening socket */
941 static void smc_accept_unlink(struct sock
*sk
)
943 struct smc_sock
*par
= smc_sk(sk
)->listen_smc
;
945 spin_lock(&par
->accept_q_lock
);
946 list_del_init(&smc_sk(sk
)->accept_q
);
947 spin_unlock(&par
->accept_q_lock
);
948 sk_acceptq_removed(&smc_sk(sk
)->listen_smc
->sk
);
949 sock_put(sk
); /* sock_hold in smc_accept_enqueue */
952 /* remove a sock from the accept queue to bind it to a new socket created
953 * for a socket accept call from user space
955 struct sock
*smc_accept_dequeue(struct sock
*parent
,
956 struct socket
*new_sock
)
958 struct smc_sock
*isk
, *n
;
961 list_for_each_entry_safe(isk
, n
, &smc_sk(parent
)->accept_q
, accept_q
) {
962 new_sk
= (struct sock
*)isk
;
964 smc_accept_unlink(new_sk
);
965 if (new_sk
->sk_state
== SMC_CLOSED
) {
966 new_sk
->sk_prot
->unhash(new_sk
);
968 sock_release(isk
->clcsock
);
971 sock_put(new_sk
); /* final */
975 sock_graft(new_sk
, new_sock
);
976 if (isk
->use_fallback
) {
977 smc_sk(new_sk
)->clcsock
->file
= new_sock
->file
;
978 isk
->clcsock
->file
->private_data
= isk
->clcsock
;
986 /* clean up for a created but never accepted sock */
987 void smc_close_non_accepted(struct sock
*sk
)
989 struct smc_sock
*smc
= smc_sk(sk
);
991 sock_hold(sk
); /* sock_put below */
993 if (!sk
->sk_lingertime
)
994 /* wait for peer closing */
995 sk
->sk_lingertime
= SMC_MAX_STREAM_WAIT_TIMEOUT
;
998 sock_put(sk
); /* sock_hold above */
999 sock_put(sk
); /* final sock_put */
1002 static int smc_serv_conf_first_link(struct smc_sock
*smc
)
1004 struct net
*net
= sock_net(smc
->clcsock
->sk
);
1005 struct smc_link_group
*lgr
= smc
->conn
.lgr
;
1006 struct smc_link
*link
;
1010 link
= &lgr
->lnk
[SMC_SINGLE_LINK
];
1012 if (smc_reg_rmb(link
, smc
->conn
.rmb_desc
, false))
1013 return SMC_CLC_DECL_ERR_REGRMB
;
1015 /* send CONFIRM LINK request to client over the RoCE fabric */
1016 rc
= smc_llc_send_confirm_link(link
, SMC_LLC_REQ
);
1018 return SMC_CLC_DECL_TIMEOUT_CL
;
1020 /* receive CONFIRM LINK response from client over the RoCE fabric */
1021 rest
= wait_for_completion_interruptible_timeout(
1022 &link
->llc_confirm_resp
,
1023 SMC_LLC_WAIT_FIRST_TIME
);
1025 struct smc_clc_msg_decline dclc
;
1027 rc
= smc_clc_wait_msg(smc
, &dclc
, sizeof(dclc
),
1028 SMC_CLC_DECLINE
, CLC_WAIT_TIME_SHORT
);
1029 return rc
== -EAGAIN
? SMC_CLC_DECL_TIMEOUT_CL
: rc
;
1032 if (link
->llc_confirm_resp_rc
)
1033 return SMC_CLC_DECL_RMBE_EC
;
1035 /* send ADD LINK request to client over the RoCE fabric */
1036 rc
= smc_llc_send_add_link(link
,
1037 link
->smcibdev
->mac
[link
->ibport
- 1],
1038 link
->gid
, SMC_LLC_REQ
);
1040 return SMC_CLC_DECL_TIMEOUT_AL
;
1042 /* receive ADD LINK response from client over the RoCE fabric */
1043 rest
= wait_for_completion_interruptible_timeout(&link
->llc_add_resp
,
1046 struct smc_clc_msg_decline dclc
;
1048 rc
= smc_clc_wait_msg(smc
, &dclc
, sizeof(dclc
),
1049 SMC_CLC_DECLINE
, CLC_WAIT_TIME_SHORT
);
1050 return rc
== -EAGAIN
? SMC_CLC_DECL_TIMEOUT_AL
: rc
;
1053 smc_llc_link_active(link
, net
->ipv4
.sysctl_tcp_keepalive_time
);
1058 /* listen worker: finish */
1059 static void smc_listen_out(struct smc_sock
*new_smc
)
1061 struct smc_sock
*lsmc
= new_smc
->listen_smc
;
1062 struct sock
*newsmcsk
= &new_smc
->sk
;
1064 if (lsmc
->sk
.sk_state
== SMC_LISTEN
) {
1065 lock_sock_nested(&lsmc
->sk
, SINGLE_DEPTH_NESTING
);
1066 smc_accept_enqueue(&lsmc
->sk
, newsmcsk
);
1067 release_sock(&lsmc
->sk
);
1068 } else { /* no longer listening */
1069 smc_close_non_accepted(newsmcsk
);
1072 /* Wake up accept */
1073 lsmc
->sk
.sk_data_ready(&lsmc
->sk
);
1074 sock_put(&lsmc
->sk
); /* sock_hold in smc_tcp_listen_work */
1077 /* listen worker: finish in state connected */
1078 static void smc_listen_out_connected(struct smc_sock
*new_smc
)
1080 struct sock
*newsmcsk
= &new_smc
->sk
;
1082 sk_refcnt_debug_inc(newsmcsk
);
1083 if (newsmcsk
->sk_state
== SMC_INIT
)
1084 newsmcsk
->sk_state
= SMC_ACTIVE
;
1086 smc_listen_out(new_smc
);
1089 /* listen worker: finish in error state */
1090 static void smc_listen_out_err(struct smc_sock
*new_smc
)
1092 struct sock
*newsmcsk
= &new_smc
->sk
;
1094 if (newsmcsk
->sk_state
== SMC_INIT
)
1095 sock_put(&new_smc
->sk
); /* passive closing */
1096 newsmcsk
->sk_state
= SMC_CLOSED
;
1098 smc_listen_out(new_smc
);
1101 /* listen worker: decline and fall back if possible */
1102 static void smc_listen_decline(struct smc_sock
*new_smc
, int reason_code
,
1105 /* RDMA setup failed, switch back to TCP */
1106 if (local_contact
== SMC_FIRST_CONTACT
)
1107 smc_lgr_cleanup_early(&new_smc
->conn
);
1109 smc_conn_free(&new_smc
->conn
);
1110 if (reason_code
< 0) { /* error, no fallback possible */
1111 smc_listen_out_err(new_smc
);
1114 smc_switch_to_fallback(new_smc
);
1115 new_smc
->fallback_rsn
= reason_code
;
1116 if (reason_code
&& reason_code
!= SMC_CLC_DECL_PEERDECL
) {
1117 if (smc_clc_send_decline(new_smc
, reason_code
) < 0) {
1118 smc_listen_out_err(new_smc
);
1122 smc_listen_out_connected(new_smc
);
1125 /* listen worker: check prefixes */
1126 static int smc_listen_prfx_check(struct smc_sock
*new_smc
,
1127 struct smc_clc_msg_proposal
*pclc
)
1129 struct smc_clc_msg_proposal_prefix
*pclc_prfx
;
1130 struct socket
*newclcsock
= new_smc
->clcsock
;
1132 pclc_prfx
= smc_clc_proposal_get_prefix(pclc
);
1133 if (smc_clc_prfx_match(newclcsock
, pclc_prfx
))
1134 return SMC_CLC_DECL_DIFFPREFIX
;
1139 /* listen worker: initialize connection and buffers */
1140 static int smc_listen_rdma_init(struct smc_sock
*new_smc
,
1141 struct smc_init_info
*ini
)
1145 /* allocate connection / link group */
1146 rc
= smc_conn_create(new_smc
, ini
);
1150 /* create send buffer and rmb */
1151 if (smc_buf_create(new_smc
, false))
1152 return SMC_CLC_DECL_MEM
;
1157 /* listen worker: initialize connection and buffers for SMC-D */
1158 static int smc_listen_ism_init(struct smc_sock
*new_smc
,
1159 struct smc_clc_msg_proposal
*pclc
,
1160 struct smc_init_info
*ini
)
1162 struct smc_clc_msg_smcd
*pclc_smcd
;
1165 pclc_smcd
= smc_get_clc_msg_smcd(pclc
);
1166 ini
->ism_gid
= pclc_smcd
->gid
;
1167 rc
= smc_conn_create(new_smc
, ini
);
1171 /* Check if peer can be reached via ISM device */
1172 if (smc_ism_cantalk(new_smc
->conn
.lgr
->peer_gid
,
1173 new_smc
->conn
.lgr
->vlan_id
,
1174 new_smc
->conn
.lgr
->smcd
)) {
1175 if (ini
->cln_first_contact
== SMC_FIRST_CONTACT
)
1176 smc_lgr_cleanup_early(&new_smc
->conn
);
1178 smc_conn_free(&new_smc
->conn
);
1179 return SMC_CLC_DECL_SMCDNOTALK
;
1182 /* Create send and receive buffers */
1183 if (smc_buf_create(new_smc
, true)) {
1184 if (ini
->cln_first_contact
== SMC_FIRST_CONTACT
)
1185 smc_lgr_cleanup_early(&new_smc
->conn
);
1187 smc_conn_free(&new_smc
->conn
);
1188 return SMC_CLC_DECL_MEM
;
1194 /* listen worker: register buffers */
1195 static int smc_listen_rdma_reg(struct smc_sock
*new_smc
, int local_contact
)
1197 struct smc_link
*link
= &new_smc
->conn
.lgr
->lnk
[SMC_SINGLE_LINK
];
1199 if (local_contact
!= SMC_FIRST_CONTACT
) {
1200 if (smc_reg_rmb(link
, new_smc
->conn
.rmb_desc
, true))
1201 return SMC_CLC_DECL_ERR_REGRMB
;
1203 smc_rmb_sync_sg_for_device(&new_smc
->conn
);
1208 /* listen worker: finish RDMA setup */
1209 static int smc_listen_rdma_finish(struct smc_sock
*new_smc
,
1210 struct smc_clc_msg_accept_confirm
*cclc
,
1213 struct smc_link
*link
= &new_smc
->conn
.lgr
->lnk
[SMC_SINGLE_LINK
];
1214 int reason_code
= 0;
1216 if (local_contact
== SMC_FIRST_CONTACT
)
1217 smc_link_save_peer_info(link
, cclc
);
1219 if (smc_rmb_rtoken_handling(&new_smc
->conn
, cclc
)) {
1220 reason_code
= SMC_CLC_DECL_ERR_RTOK
;
1224 if (local_contact
== SMC_FIRST_CONTACT
) {
1225 if (smc_ib_ready_link(link
)) {
1226 reason_code
= SMC_CLC_DECL_ERR_RDYLNK
;
1229 /* QP confirmation over RoCE fabric */
1230 reason_code
= smc_serv_conf_first_link(new_smc
);
1237 smc_listen_decline(new_smc
, reason_code
, local_contact
);
1241 /* setup for RDMA connection of server */
1242 static void smc_listen_work(struct work_struct
*work
)
1244 struct smc_sock
*new_smc
= container_of(work
, struct smc_sock
,
1246 struct socket
*newclcsock
= new_smc
->clcsock
;
1247 struct smc_clc_msg_accept_confirm cclc
;
1248 struct smc_clc_msg_proposal
*pclc
;
1249 struct smc_init_info ini
= {0};
1250 bool ism_supported
= false;
1251 u8 buf
[SMC_CLC_MAX_LEN
];
1254 if (new_smc
->listen_smc
->sk
.sk_state
!= SMC_LISTEN
)
1255 return smc_listen_out_err(new_smc
);
1257 if (new_smc
->use_fallback
) {
1258 smc_listen_out_connected(new_smc
);
1262 /* check if peer is smc capable */
1263 if (!tcp_sk(newclcsock
->sk
)->syn_smc
) {
1264 smc_switch_to_fallback(new_smc
);
1265 new_smc
->fallback_rsn
= SMC_CLC_DECL_PEERNOSMC
;
1266 smc_listen_out_connected(new_smc
);
1270 /* do inband token exchange -
1271 * wait for and receive SMC Proposal CLC message
1273 pclc
= (struct smc_clc_msg_proposal
*)&buf
;
1274 rc
= smc_clc_wait_msg(new_smc
, pclc
, SMC_CLC_MAX_LEN
,
1275 SMC_CLC_PROPOSAL
, CLC_WAIT_TIME
);
1279 /* IPSec connections opt out of SMC-R optimizations */
1280 if (using_ipsec(new_smc
)) {
1281 rc
= SMC_CLC_DECL_IPSEC
;
1285 /* check for matching IP prefix and subnet length */
1286 rc
= smc_listen_prfx_check(new_smc
, pclc
);
1290 /* get vlan id from IP device */
1291 if (smc_vlan_by_tcpsk(new_smc
->clcsock
, &ini
)) {
1292 rc
= SMC_CLC_DECL_GETVLANERR
;
1296 mutex_lock(&smc_server_lgr_pending
);
1297 smc_close_init(new_smc
);
1298 smc_rx_init(new_smc
);
1299 smc_tx_init(new_smc
);
1301 /* check if ISM is available */
1302 if (pclc
->hdr
.path
== SMC_TYPE_D
|| pclc
->hdr
.path
== SMC_TYPE_B
) {
1303 ini
.is_smcd
= true; /* prepare ISM check */
1304 rc
= smc_find_ism_device(new_smc
, &ini
);
1306 rc
= smc_listen_ism_init(new_smc
, pclc
, &ini
);
1308 ism_supported
= true;
1309 else if (pclc
->hdr
.path
== SMC_TYPE_D
)
1310 goto out_unlock
; /* skip RDMA and decline */
1313 /* check if RDMA is available */
1314 if (!ism_supported
) { /* SMC_TYPE_R or SMC_TYPE_B */
1315 /* prepare RDMA check */
1316 ini
.is_smcd
= false;
1318 ini
.ib_lcl
= &pclc
->lcl
;
1319 rc
= smc_find_rdma_device(new_smc
, &ini
);
1321 /* no RDMA device found */
1322 if (pclc
->hdr
.path
== SMC_TYPE_B
)
1323 /* neither ISM nor RDMA device found */
1324 rc
= SMC_CLC_DECL_NOSMCDEV
;
1327 rc
= smc_listen_rdma_init(new_smc
, &ini
);
1330 rc
= smc_listen_rdma_reg(new_smc
, ini
.cln_first_contact
);
1335 /* send SMC Accept CLC message */
1336 rc
= smc_clc_send_accept(new_smc
, ini
.cln_first_contact
);
1340 /* SMC-D does not need this lock any more */
1342 mutex_unlock(&smc_server_lgr_pending
);
1344 /* receive SMC Confirm CLC message */
1345 rc
= smc_clc_wait_msg(new_smc
, &cclc
, sizeof(cclc
),
1346 SMC_CLC_CONFIRM
, CLC_WAIT_TIME
);
1354 if (!ism_supported
) {
1355 rc
= smc_listen_rdma_finish(new_smc
, &cclc
,
1356 ini
.cln_first_contact
);
1357 mutex_unlock(&smc_server_lgr_pending
);
1361 smc_conn_save_peer_info(new_smc
, &cclc
);
1362 smc_listen_out_connected(new_smc
);
1366 mutex_unlock(&smc_server_lgr_pending
);
1368 smc_listen_decline(new_smc
, rc
, ini
.cln_first_contact
);
1371 static void smc_tcp_listen_work(struct work_struct
*work
)
1373 struct smc_sock
*lsmc
= container_of(work
, struct smc_sock
,
1375 struct sock
*lsk
= &lsmc
->sk
;
1376 struct smc_sock
*new_smc
;
1380 while (lsk
->sk_state
== SMC_LISTEN
) {
1381 rc
= smc_clcsock_accept(lsmc
, &new_smc
);
1387 new_smc
->listen_smc
= lsmc
;
1388 new_smc
->use_fallback
= lsmc
->use_fallback
;
1389 new_smc
->fallback_rsn
= lsmc
->fallback_rsn
;
1390 sock_hold(lsk
); /* sock_put in smc_listen_work */
1391 INIT_WORK(&new_smc
->smc_listen_work
, smc_listen_work
);
1392 smc_copy_sock_settings_to_smc(new_smc
);
1393 new_smc
->sk
.sk_sndbuf
= lsmc
->sk
.sk_sndbuf
;
1394 new_smc
->sk
.sk_rcvbuf
= lsmc
->sk
.sk_rcvbuf
;
1395 sock_hold(&new_smc
->sk
); /* sock_put in passive closing */
1396 if (!schedule_work(&new_smc
->smc_listen_work
))
1397 sock_put(&new_smc
->sk
);
1402 sock_put(&lsmc
->sk
); /* sock_hold in smc_listen */
1405 static int smc_listen(struct socket
*sock
, int backlog
)
1407 struct sock
*sk
= sock
->sk
;
1408 struct smc_sock
*smc
;
1415 if ((sk
->sk_state
!= SMC_INIT
&& sk
->sk_state
!= SMC_LISTEN
) ||
1416 smc
->connect_nonblock
)
1420 if (sk
->sk_state
== SMC_LISTEN
) {
1421 sk
->sk_max_ack_backlog
= backlog
;
1424 /* some socket options are handled in core, so we could not apply
1425 * them to the clc socket -- copy smc socket options to clc socket
1427 smc_copy_sock_settings_to_clc(smc
);
1428 if (!smc
->use_fallback
)
1429 tcp_sk(smc
->clcsock
->sk
)->syn_smc
= 1;
1431 rc
= kernel_listen(smc
->clcsock
, backlog
);
1434 sk
->sk_max_ack_backlog
= backlog
;
1435 sk
->sk_ack_backlog
= 0;
1436 sk
->sk_state
= SMC_LISTEN
;
1437 sock_hold(sk
); /* sock_hold in tcp_listen_worker */
1438 if (!schedule_work(&smc
->tcp_listen_work
))
1446 static int smc_accept(struct socket
*sock
, struct socket
*new_sock
,
1447 int flags
, bool kern
)
1449 struct sock
*sk
= sock
->sk
, *nsk
;
1450 DECLARE_WAITQUEUE(wait
, current
);
1451 struct smc_sock
*lsmc
;
1456 sock_hold(sk
); /* sock_put below */
1459 if (lsmc
->sk
.sk_state
!= SMC_LISTEN
) {
1465 /* Wait for an incoming connection */
1466 timeo
= sock_rcvtimeo(sk
, flags
& O_NONBLOCK
);
1467 add_wait_queue_exclusive(sk_sleep(sk
), &wait
);
1468 while (!(nsk
= smc_accept_dequeue(sk
, new_sock
))) {
1469 set_current_state(TASK_INTERRUPTIBLE
);
1475 timeo
= schedule_timeout(timeo
);
1476 /* wakeup by sk_data_ready in smc_listen_work() */
1477 sched_annotate_sleep();
1479 if (signal_pending(current
)) {
1480 rc
= sock_intr_errno(timeo
);
1484 set_current_state(TASK_RUNNING
);
1485 remove_wait_queue(sk_sleep(sk
), &wait
);
1488 rc
= sock_error(nsk
);
1493 if (lsmc
->sockopt_defer_accept
&& !(flags
& O_NONBLOCK
)) {
1494 /* wait till data arrives on the socket */
1495 timeo
= msecs_to_jiffies(lsmc
->sockopt_defer_accept
*
1497 if (smc_sk(nsk
)->use_fallback
) {
1498 struct sock
*clcsk
= smc_sk(nsk
)->clcsock
->sk
;
1501 if (skb_queue_empty(&clcsk
->sk_receive_queue
))
1502 sk_wait_data(clcsk
, &timeo
, NULL
);
1503 release_sock(clcsk
);
1504 } else if (!atomic_read(&smc_sk(nsk
)->conn
.bytes_to_rcv
)) {
1506 smc_rx_wait(smc_sk(nsk
), &timeo
, smc_rx_data_available
);
1512 sock_put(sk
); /* sock_hold above */
1516 static int smc_getname(struct socket
*sock
, struct sockaddr
*addr
,
1519 struct smc_sock
*smc
;
1521 if (peer
&& (sock
->sk
->sk_state
!= SMC_ACTIVE
) &&
1522 (sock
->sk
->sk_state
!= SMC_APPCLOSEWAIT1
))
1525 smc
= smc_sk(sock
->sk
);
1527 return smc
->clcsock
->ops
->getname(smc
->clcsock
, addr
, peer
);
1530 static int smc_sendmsg(struct socket
*sock
, struct msghdr
*msg
, size_t len
)
1532 struct sock
*sk
= sock
->sk
;
1533 struct smc_sock
*smc
;
1538 if ((sk
->sk_state
!= SMC_ACTIVE
) &&
1539 (sk
->sk_state
!= SMC_APPCLOSEWAIT1
) &&
1540 (sk
->sk_state
!= SMC_INIT
))
1543 if (msg
->msg_flags
& MSG_FASTOPEN
) {
1544 if (sk
->sk_state
== SMC_INIT
&& !smc
->connect_nonblock
) {
1545 smc_switch_to_fallback(smc
);
1546 smc
->fallback_rsn
= SMC_CLC_DECL_OPTUNSUPP
;
1553 if (smc
->use_fallback
)
1554 rc
= smc
->clcsock
->ops
->sendmsg(smc
->clcsock
, msg
, len
);
1556 rc
= smc_tx_sendmsg(smc
, msg
, len
);
1562 static int smc_recvmsg(struct socket
*sock
, struct msghdr
*msg
, size_t len
,
1565 struct sock
*sk
= sock
->sk
;
1566 struct smc_sock
*smc
;
1571 if (sk
->sk_state
== SMC_CLOSED
&& (sk
->sk_shutdown
& RCV_SHUTDOWN
)) {
1572 /* socket was connected before, no more data to read */
1576 if ((sk
->sk_state
== SMC_INIT
) ||
1577 (sk
->sk_state
== SMC_LISTEN
) ||
1578 (sk
->sk_state
== SMC_CLOSED
))
1581 if (sk
->sk_state
== SMC_PEERFINCLOSEWAIT
) {
1586 if (smc
->use_fallback
) {
1587 rc
= smc
->clcsock
->ops
->recvmsg(smc
->clcsock
, msg
, len
, flags
);
1589 msg
->msg_namelen
= 0;
1590 rc
= smc_rx_recvmsg(smc
, msg
, NULL
, len
, flags
);
1598 static __poll_t
smc_accept_poll(struct sock
*parent
)
1600 struct smc_sock
*isk
= smc_sk(parent
);
1603 spin_lock(&isk
->accept_q_lock
);
1604 if (!list_empty(&isk
->accept_q
))
1605 mask
= EPOLLIN
| EPOLLRDNORM
;
1606 spin_unlock(&isk
->accept_q_lock
);
1611 static __poll_t
smc_poll(struct file
*file
, struct socket
*sock
,
1614 struct sock
*sk
= sock
->sk
;
1615 struct smc_sock
*smc
;
1621 smc
= smc_sk(sock
->sk
);
1622 if (smc
->use_fallback
) {
1623 /* delegate to CLC child sock */
1624 mask
= smc
->clcsock
->ops
->poll(file
, smc
->clcsock
, wait
);
1625 sk
->sk_err
= smc
->clcsock
->sk
->sk_err
;
1627 if (sk
->sk_state
!= SMC_CLOSED
)
1628 sock_poll_wait(file
, sock
, wait
);
1631 if ((sk
->sk_shutdown
== SHUTDOWN_MASK
) ||
1632 (sk
->sk_state
== SMC_CLOSED
))
1634 if (sk
->sk_state
== SMC_LISTEN
) {
1635 /* woken up by sk_data_ready in smc_listen_work() */
1636 mask
|= smc_accept_poll(sk
);
1637 } else if (smc
->use_fallback
) { /* as result of connect_work()*/
1638 mask
|= smc
->clcsock
->ops
->poll(file
, smc
->clcsock
,
1640 sk
->sk_err
= smc
->clcsock
->sk
->sk_err
;
1642 if ((sk
->sk_state
!= SMC_INIT
&&
1643 atomic_read(&smc
->conn
.sndbuf_space
)) ||
1644 sk
->sk_shutdown
& SEND_SHUTDOWN
) {
1645 mask
|= EPOLLOUT
| EPOLLWRNORM
;
1647 sk_set_bit(SOCKWQ_ASYNC_NOSPACE
, sk
);
1648 set_bit(SOCK_NOSPACE
, &sk
->sk_socket
->flags
);
1650 if (atomic_read(&smc
->conn
.bytes_to_rcv
))
1651 mask
|= EPOLLIN
| EPOLLRDNORM
;
1652 if (sk
->sk_shutdown
& RCV_SHUTDOWN
)
1653 mask
|= EPOLLIN
| EPOLLRDNORM
| EPOLLRDHUP
;
1654 if (sk
->sk_state
== SMC_APPCLOSEWAIT1
)
1656 if (smc
->conn
.urg_state
== SMC_URG_VALID
)
1664 static int smc_shutdown(struct socket
*sock
, int how
)
1666 struct sock
*sk
= sock
->sk
;
1667 struct smc_sock
*smc
;
1673 if ((how
< SHUT_RD
) || (how
> SHUT_RDWR
))
1679 if ((sk
->sk_state
!= SMC_ACTIVE
) &&
1680 (sk
->sk_state
!= SMC_PEERCLOSEWAIT1
) &&
1681 (sk
->sk_state
!= SMC_PEERCLOSEWAIT2
) &&
1682 (sk
->sk_state
!= SMC_APPCLOSEWAIT1
) &&
1683 (sk
->sk_state
!= SMC_APPCLOSEWAIT2
) &&
1684 (sk
->sk_state
!= SMC_APPFINCLOSEWAIT
))
1686 if (smc
->use_fallback
) {
1687 rc
= kernel_sock_shutdown(smc
->clcsock
, how
);
1688 sk
->sk_shutdown
= smc
->clcsock
->sk
->sk_shutdown
;
1689 if (sk
->sk_shutdown
== SHUTDOWN_MASK
)
1690 sk
->sk_state
= SMC_CLOSED
;
1694 case SHUT_RDWR
: /* shutdown in both directions */
1695 rc
= smc_close_active(smc
);
1698 rc
= smc_close_shutdown_write(smc
);
1702 /* nothing more to do because peer is not involved */
1706 rc1
= kernel_sock_shutdown(smc
->clcsock
, how
);
1707 /* map sock_shutdown_cmd constants to sk_shutdown value range */
1708 sk
->sk_shutdown
|= how
+ 1;
1712 return rc
? rc
: rc1
;
1715 static int smc_setsockopt(struct socket
*sock
, int level
, int optname
,
1716 char __user
*optval
, unsigned int optlen
)
1718 struct sock
*sk
= sock
->sk
;
1719 struct smc_sock
*smc
;
1724 /* generic setsockopts reaching us here always apply to the
1727 rc
= smc
->clcsock
->ops
->setsockopt(smc
->clcsock
, level
, optname
,
1729 if (smc
->clcsock
->sk
->sk_err
) {
1730 sk
->sk_err
= smc
->clcsock
->sk
->sk_err
;
1731 sk
->sk_error_report(sk
);
1734 if (optlen
< sizeof(int))
1736 if (get_user(val
, (int __user
*)optval
))
1740 if (rc
|| smc
->use_fallback
)
1745 case TCP_FASTOPEN_CONNECT
:
1746 case TCP_FASTOPEN_KEY
:
1747 case TCP_FASTOPEN_NO_COOKIE
:
1748 /* option not supported by SMC */
1749 if (sk
->sk_state
== SMC_INIT
&& !smc
->connect_nonblock
) {
1750 smc_switch_to_fallback(smc
);
1751 smc
->fallback_rsn
= SMC_CLC_DECL_OPTUNSUPP
;
1757 if (sk
->sk_state
!= SMC_INIT
&&
1758 sk
->sk_state
!= SMC_LISTEN
&&
1759 sk
->sk_state
!= SMC_CLOSED
) {
1761 mod_delayed_work(system_wq
, &smc
->conn
.tx_work
,
1766 if (sk
->sk_state
!= SMC_INIT
&&
1767 sk
->sk_state
!= SMC_LISTEN
&&
1768 sk
->sk_state
!= SMC_CLOSED
) {
1770 mod_delayed_work(system_wq
, &smc
->conn
.tx_work
,
1774 case TCP_DEFER_ACCEPT
:
1775 smc
->sockopt_defer_accept
= val
;
1786 static int smc_getsockopt(struct socket
*sock
, int level
, int optname
,
1787 char __user
*optval
, int __user
*optlen
)
1789 struct smc_sock
*smc
;
1791 smc
= smc_sk(sock
->sk
);
1792 /* socket options apply to the CLC socket */
1793 return smc
->clcsock
->ops
->getsockopt(smc
->clcsock
, level
, optname
,
1797 static int smc_ioctl(struct socket
*sock
, unsigned int cmd
,
1800 union smc_host_cursor cons
, urg
;
1801 struct smc_connection
*conn
;
1802 struct smc_sock
*smc
;
1805 smc
= smc_sk(sock
->sk
);
1807 lock_sock(&smc
->sk
);
1808 if (smc
->use_fallback
) {
1809 if (!smc
->clcsock
) {
1810 release_sock(&smc
->sk
);
1813 answ
= smc
->clcsock
->ops
->ioctl(smc
->clcsock
, cmd
, arg
);
1814 release_sock(&smc
->sk
);
1818 case SIOCINQ
: /* same as FIONREAD */
1819 if (smc
->sk
.sk_state
== SMC_LISTEN
) {
1820 release_sock(&smc
->sk
);
1823 if (smc
->sk
.sk_state
== SMC_INIT
||
1824 smc
->sk
.sk_state
== SMC_CLOSED
)
1827 answ
= atomic_read(&smc
->conn
.bytes_to_rcv
);
1830 /* output queue size (not send + not acked) */
1831 if (smc
->sk
.sk_state
== SMC_LISTEN
) {
1832 release_sock(&smc
->sk
);
1835 if (smc
->sk
.sk_state
== SMC_INIT
||
1836 smc
->sk
.sk_state
== SMC_CLOSED
)
1839 answ
= smc
->conn
.sndbuf_desc
->len
-
1840 atomic_read(&smc
->conn
.sndbuf_space
);
1843 /* output queue size (not send only) */
1844 if (smc
->sk
.sk_state
== SMC_LISTEN
) {
1845 release_sock(&smc
->sk
);
1848 if (smc
->sk
.sk_state
== SMC_INIT
||
1849 smc
->sk
.sk_state
== SMC_CLOSED
)
1852 answ
= smc_tx_prepared_sends(&smc
->conn
);
1855 if (smc
->sk
.sk_state
== SMC_LISTEN
) {
1856 release_sock(&smc
->sk
);
1859 if (smc
->sk
.sk_state
== SMC_INIT
||
1860 smc
->sk
.sk_state
== SMC_CLOSED
) {
1863 smc_curs_copy(&cons
, &conn
->local_tx_ctrl
.cons
, conn
);
1864 smc_curs_copy(&urg
, &conn
->urg_curs
, conn
);
1865 answ
= smc_curs_diff(conn
->rmb_desc
->len
,
1870 release_sock(&smc
->sk
);
1871 return -ENOIOCTLCMD
;
1873 release_sock(&smc
->sk
);
1875 return put_user(answ
, (int __user
*)arg
);
1878 static ssize_t
smc_sendpage(struct socket
*sock
, struct page
*page
,
1879 int offset
, size_t size
, int flags
)
1881 struct sock
*sk
= sock
->sk
;
1882 struct smc_sock
*smc
;
1887 if (sk
->sk_state
!= SMC_ACTIVE
) {
1892 if (smc
->use_fallback
)
1893 rc
= kernel_sendpage(smc
->clcsock
, page
, offset
,
1896 rc
= sock_no_sendpage(sock
, page
, offset
, size
, flags
);
1902 /* Map the affected portions of the rmbe into an spd, note the number of bytes
1903 * to splice in conn->splice_pending, and press 'go'. Delays consumer cursor
1904 * updates till whenever a respective page has been fully processed.
1905 * Note that subsequent recv() calls have to wait till all splice() processing
1908 static ssize_t
smc_splice_read(struct socket
*sock
, loff_t
*ppos
,
1909 struct pipe_inode_info
*pipe
, size_t len
,
1912 struct sock
*sk
= sock
->sk
;
1913 struct smc_sock
*smc
;
1918 if (sk
->sk_state
== SMC_CLOSED
&& (sk
->sk_shutdown
& RCV_SHUTDOWN
)) {
1919 /* socket was connected before, no more data to read */
1923 if (sk
->sk_state
== SMC_INIT
||
1924 sk
->sk_state
== SMC_LISTEN
||
1925 sk
->sk_state
== SMC_CLOSED
)
1928 if (sk
->sk_state
== SMC_PEERFINCLOSEWAIT
) {
1933 if (smc
->use_fallback
) {
1934 rc
= smc
->clcsock
->ops
->splice_read(smc
->clcsock
, ppos
,
1941 if (flags
& SPLICE_F_NONBLOCK
)
1942 flags
= MSG_DONTWAIT
;
1945 rc
= smc_rx_recvmsg(smc
, NULL
, pipe
, len
, flags
);
1953 /* must look like tcp */
1954 static const struct proto_ops smc_sock_ops
= {
1956 .owner
= THIS_MODULE
,
1957 .release
= smc_release
,
1959 .connect
= smc_connect
,
1960 .socketpair
= sock_no_socketpair
,
1961 .accept
= smc_accept
,
1962 .getname
= smc_getname
,
1965 .listen
= smc_listen
,
1966 .shutdown
= smc_shutdown
,
1967 .setsockopt
= smc_setsockopt
,
1968 .getsockopt
= smc_getsockopt
,
1969 .sendmsg
= smc_sendmsg
,
1970 .recvmsg
= smc_recvmsg
,
1971 .mmap
= sock_no_mmap
,
1972 .sendpage
= smc_sendpage
,
1973 .splice_read
= smc_splice_read
,
1976 static int smc_create(struct net
*net
, struct socket
*sock
, int protocol
,
1979 int family
= (protocol
== SMCPROTO_SMC6
) ? PF_INET6
: PF_INET
;
1980 struct smc_sock
*smc
;
1984 rc
= -ESOCKTNOSUPPORT
;
1985 if (sock
->type
!= SOCK_STREAM
)
1988 rc
= -EPROTONOSUPPORT
;
1989 if (protocol
!= SMCPROTO_SMC
&& protocol
!= SMCPROTO_SMC6
)
1993 sock
->ops
= &smc_sock_ops
;
1994 sk
= smc_sock_alloc(net
, sock
, protocol
);
1998 /* create internal TCP socket for CLC handshake and fallback */
2000 smc
->use_fallback
= false; /* assume rdma capability first */
2001 smc
->fallback_rsn
= 0;
2002 rc
= sock_create_kern(net
, family
, SOCK_STREAM
, IPPROTO_TCP
,
2005 sk_common_release(sk
);
2008 smc
->sk
.sk_sndbuf
= max(smc
->clcsock
->sk
->sk_sndbuf
, SMC_BUF_MIN_SIZE
);
2009 smc
->sk
.sk_rcvbuf
= max(smc
->clcsock
->sk
->sk_rcvbuf
, SMC_BUF_MIN_SIZE
);
2015 static const struct net_proto_family smc_sock_family_ops
= {
2017 .owner
= THIS_MODULE
,
2018 .create
= smc_create
,
2021 unsigned int smc_net_id
;
2023 static __net_init
int smc_net_init(struct net
*net
)
2025 return smc_pnet_net_init(net
);
2028 static void __net_exit
smc_net_exit(struct net
*net
)
2030 smc_pnet_net_exit(net
);
2033 static struct pernet_operations smc_net_ops
= {
2034 .init
= smc_net_init
,
2035 .exit
= smc_net_exit
,
2037 .size
= sizeof(struct smc_net
),
2040 static int __init
smc_init(void)
2044 rc
= register_pernet_subsys(&smc_net_ops
);
2048 rc
= smc_pnet_init();
2050 goto out_pernet_subsys
;
2052 rc
= smc_core_init();
2054 pr_err("%s: smc_core_init fails with %d\n", __func__
, rc
);
2058 rc
= smc_llc_init();
2060 pr_err("%s: smc_llc_init fails with %d\n", __func__
, rc
);
2064 rc
= smc_cdc_init();
2066 pr_err("%s: smc_cdc_init fails with %d\n", __func__
, rc
);
2070 rc
= proto_register(&smc_proto
, 1);
2072 pr_err("%s: proto_register(v4) fails with %d\n", __func__
, rc
);
2076 rc
= proto_register(&smc_proto6
, 1);
2078 pr_err("%s: proto_register(v6) fails with %d\n", __func__
, rc
);
2082 rc
= sock_register(&smc_sock_family_ops
);
2084 pr_err("%s: sock_register fails with %d\n", __func__
, rc
);
2087 INIT_HLIST_HEAD(&smc_v4_hashinfo
.ht
);
2088 INIT_HLIST_HEAD(&smc_v6_hashinfo
.ht
);
2090 rc
= smc_ib_register_client();
2092 pr_err("%s: ib_register fails with %d\n", __func__
, rc
);
2096 static_branch_enable(&tcp_have_smc
);
2100 sock_unregister(PF_SMC
);
2102 proto_unregister(&smc_proto6
);
2104 proto_unregister(&smc_proto
);
2110 unregister_pernet_subsys(&smc_net_ops
);
2115 static void __exit
smc_exit(void)
2117 static_branch_disable(&tcp_have_smc
);
2118 sock_unregister(PF_SMC
);
2120 smc_ib_unregister_client();
2121 proto_unregister(&smc_proto6
);
2122 proto_unregister(&smc_proto
);
2124 unregister_pernet_subsys(&smc_net_ops
);
2128 module_init(smc_init
);
2129 module_exit(smc_exit
);
2131 MODULE_AUTHOR("Ursula Braun <ubraun@linux.vnet.ibm.com>");
2132 MODULE_DESCRIPTION("smc socket address family");
2133 MODULE_LICENSE("GPL");
2134 MODULE_ALIAS_NETPROTO(PF_SMC
);