2 * Shared Memory Communications over RDMA (SMC-R) and RoCE
4 * AF_SMC protocol family socket handler keeping the AF_INET sock address type
5 * applies to SOCK_STREAM sockets only
6 * offers an alternative communication option for TCP-protocol sockets
7 * applicable with RoCE-cards only
9 * Initial restrictions:
10 * - support for alternate links postponed
12 * Copyright IBM Corp. 2016, 2018
14 * Author(s): Ursula Braun <ubraun@linux.vnet.ibm.com>
15 * based on prototype from Frank Blaschka
18 #define KMSG_COMPONENT "smc"
19 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
21 #include <linux/module.h>
22 #include <linux/socket.h>
23 #include <linux/workqueue.h>
25 #include <linux/sched/signal.h>
26 #include <linux/if_vlan.h>
31 #include <asm/ioctls.h>
43 #include "smc_close.h"
45 static DEFINE_MUTEX(smc_create_lgr_pending
); /* serialize link group
49 static void smc_tcp_listen_work(struct work_struct
*);
50 static void smc_connect_work(struct work_struct
*);
52 static void smc_set_keepalive(struct sock
*sk
, int val
)
54 struct smc_sock
*smc
= smc_sk(sk
);
56 smc
->clcsock
->sk
->sk_prot
->keepalive(smc
->clcsock
->sk
, val
);
59 static struct smc_hashinfo smc_v4_hashinfo
= {
60 .lock
= __RW_LOCK_UNLOCKED(smc_v4_hashinfo
.lock
),
63 static struct smc_hashinfo smc_v6_hashinfo
= {
64 .lock
= __RW_LOCK_UNLOCKED(smc_v6_hashinfo
.lock
),
67 int smc_hash_sk(struct sock
*sk
)
69 struct smc_hashinfo
*h
= sk
->sk_prot
->h
.smc_hash
;
70 struct hlist_head
*head
;
74 write_lock_bh(&h
->lock
);
75 sk_add_node(sk
, head
);
76 sock_prot_inuse_add(sock_net(sk
), sk
->sk_prot
, 1);
77 write_unlock_bh(&h
->lock
);
81 EXPORT_SYMBOL_GPL(smc_hash_sk
);
83 void smc_unhash_sk(struct sock
*sk
)
85 struct smc_hashinfo
*h
= sk
->sk_prot
->h
.smc_hash
;
87 write_lock_bh(&h
->lock
);
88 if (sk_del_node_init(sk
))
89 sock_prot_inuse_add(sock_net(sk
), sk
->sk_prot
, -1);
90 write_unlock_bh(&h
->lock
);
92 EXPORT_SYMBOL_GPL(smc_unhash_sk
);
94 struct proto smc_proto
= {
97 .keepalive
= smc_set_keepalive
,
99 .unhash
= smc_unhash_sk
,
100 .obj_size
= sizeof(struct smc_sock
),
101 .h
.smc_hash
= &smc_v4_hashinfo
,
102 .slab_flags
= SLAB_TYPESAFE_BY_RCU
,
104 EXPORT_SYMBOL_GPL(smc_proto
);
106 struct proto smc_proto6
= {
108 .owner
= THIS_MODULE
,
109 .keepalive
= smc_set_keepalive
,
111 .unhash
= smc_unhash_sk
,
112 .obj_size
= sizeof(struct smc_sock
),
113 .h
.smc_hash
= &smc_v6_hashinfo
,
114 .slab_flags
= SLAB_TYPESAFE_BY_RCU
,
116 EXPORT_SYMBOL_GPL(smc_proto6
);
118 static int smc_release(struct socket
*sock
)
120 struct sock
*sk
= sock
->sk
;
121 struct smc_sock
*smc
;
129 /* cleanup for a dangling non-blocking connect */
130 if (smc
->connect_info
&& sk
->sk_state
== SMC_INIT
)
131 tcp_abort(smc
->clcsock
->sk
, ECONNABORTED
);
132 flush_work(&smc
->connect_work
);
133 kfree(smc
->connect_info
);
134 smc
->connect_info
= NULL
;
136 if (sk
->sk_state
== SMC_LISTEN
)
137 /* smc_close_non_accepted() is called and acquires
138 * sock lock for child sockets again
140 lock_sock_nested(sk
, SINGLE_DEPTH_NESTING
);
144 if (!smc
->use_fallback
) {
145 rc
= smc_close_active(smc
);
146 sock_set_flag(sk
, SOCK_DEAD
);
147 sk
->sk_shutdown
|= SHUTDOWN_MASK
;
150 sock_release(smc
->clcsock
);
153 if (smc
->use_fallback
) {
154 if (sk
->sk_state
!= SMC_LISTEN
&& sk
->sk_state
!= SMC_INIT
)
155 sock_put(sk
); /* passive closing */
156 sk
->sk_state
= SMC_CLOSED
;
157 sk
->sk_state_change(sk
);
163 if (!smc
->use_fallback
&& sk
->sk_state
== SMC_CLOSED
)
164 smc_conn_free(&smc
->conn
);
167 sk
->sk_prot
->unhash(sk
);
168 sock_put(sk
); /* final sock_put */
173 static void smc_destruct(struct sock
*sk
)
175 if (sk
->sk_state
!= SMC_CLOSED
)
177 if (!sock_flag(sk
, SOCK_DEAD
))
180 sk_refcnt_debug_dec(sk
);
183 static struct sock
*smc_sock_alloc(struct net
*net
, struct socket
*sock
,
186 struct smc_sock
*smc
;
190 prot
= (protocol
== SMCPROTO_SMC6
) ? &smc_proto6
: &smc_proto
;
191 sk
= sk_alloc(net
, PF_SMC
, GFP_KERNEL
, prot
, 0);
195 sock_init_data(sock
, sk
); /* sets sk_refcnt to 1 */
196 sk
->sk_state
= SMC_INIT
;
197 sk
->sk_destruct
= smc_destruct
;
198 sk
->sk_protocol
= protocol
;
200 INIT_WORK(&smc
->tcp_listen_work
, smc_tcp_listen_work
);
201 INIT_WORK(&smc
->connect_work
, smc_connect_work
);
202 INIT_DELAYED_WORK(&smc
->conn
.tx_work
, smc_tx_work
);
203 INIT_LIST_HEAD(&smc
->accept_q
);
204 spin_lock_init(&smc
->accept_q_lock
);
205 spin_lock_init(&smc
->conn
.send_lock
);
206 sk
->sk_prot
->hash(sk
);
207 sk_refcnt_debug_inc(sk
);
212 static int smc_bind(struct socket
*sock
, struct sockaddr
*uaddr
,
215 struct sockaddr_in
*addr
= (struct sockaddr_in
*)uaddr
;
216 struct sock
*sk
= sock
->sk
;
217 struct smc_sock
*smc
;
222 /* replicate tests from inet_bind(), to be safe wrt. future changes */
224 if (addr_len
< sizeof(struct sockaddr_in
))
228 if (addr
->sin_family
!= AF_INET
&&
229 addr
->sin_family
!= AF_INET6
&&
230 addr
->sin_family
!= AF_UNSPEC
)
232 /* accept AF_UNSPEC (mapped to AF_INET) only if s_addr is INADDR_ANY */
233 if (addr
->sin_family
== AF_UNSPEC
&&
234 addr
->sin_addr
.s_addr
!= htonl(INADDR_ANY
))
239 /* Check if socket is already active */
241 if (sk
->sk_state
!= SMC_INIT
)
244 smc
->clcsock
->sk
->sk_reuse
= sk
->sk_reuse
;
245 rc
= kernel_bind(smc
->clcsock
, uaddr
, addr_len
);
253 static void smc_copy_sock_settings(struct sock
*nsk
, struct sock
*osk
,
256 /* options we don't get control via setsockopt for */
257 nsk
->sk_type
= osk
->sk_type
;
258 nsk
->sk_sndbuf
= osk
->sk_sndbuf
;
259 nsk
->sk_rcvbuf
= osk
->sk_rcvbuf
;
260 nsk
->sk_sndtimeo
= osk
->sk_sndtimeo
;
261 nsk
->sk_rcvtimeo
= osk
->sk_rcvtimeo
;
262 nsk
->sk_mark
= osk
->sk_mark
;
263 nsk
->sk_priority
= osk
->sk_priority
;
264 nsk
->sk_rcvlowat
= osk
->sk_rcvlowat
;
265 nsk
->sk_bound_dev_if
= osk
->sk_bound_dev_if
;
266 nsk
->sk_err
= osk
->sk_err
;
268 nsk
->sk_flags
&= ~mask
;
269 nsk
->sk_flags
|= osk
->sk_flags
& mask
;
272 #define SK_FLAGS_SMC_TO_CLC ((1UL << SOCK_URGINLINE) | \
273 (1UL << SOCK_KEEPOPEN) | \
274 (1UL << SOCK_LINGER) | \
275 (1UL << SOCK_BROADCAST) | \
276 (1UL << SOCK_TIMESTAMP) | \
277 (1UL << SOCK_DBG) | \
278 (1UL << SOCK_RCVTSTAMP) | \
279 (1UL << SOCK_RCVTSTAMPNS) | \
280 (1UL << SOCK_LOCALROUTE) | \
281 (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE) | \
282 (1UL << SOCK_RXQ_OVFL) | \
283 (1UL << SOCK_WIFI_STATUS) | \
284 (1UL << SOCK_NOFCS) | \
285 (1UL << SOCK_FILTER_LOCKED))
286 /* copy only relevant settings and flags of SOL_SOCKET level from smc to
287 * clc socket (since smc is not called for these options from net/core)
289 static void smc_copy_sock_settings_to_clc(struct smc_sock
*smc
)
291 smc_copy_sock_settings(smc
->clcsock
->sk
, &smc
->sk
, SK_FLAGS_SMC_TO_CLC
);
294 #define SK_FLAGS_CLC_TO_SMC ((1UL << SOCK_URGINLINE) | \
295 (1UL << SOCK_KEEPOPEN) | \
296 (1UL << SOCK_LINGER) | \
298 /* copy only settings and flags relevant for smc from clc to smc socket */
299 static void smc_copy_sock_settings_to_smc(struct smc_sock
*smc
)
301 smc_copy_sock_settings(&smc
->sk
, smc
->clcsock
->sk
, SK_FLAGS_CLC_TO_SMC
);
304 /* register a new rmb, optionally send confirm_rkey msg to register with peer */
305 static int smc_reg_rmb(struct smc_link
*link
, struct smc_buf_desc
*rmb_desc
,
308 /* register memory region for new rmb */
309 if (smc_wr_reg_send(link
, rmb_desc
->mr_rx
[SMC_SINGLE_LINK
])) {
310 rmb_desc
->regerr
= 1;
315 /* exchange confirm_rkey msg with peer */
316 if (smc_llc_do_confirm_rkey(link
, rmb_desc
)) {
317 rmb_desc
->regerr
= 1;
323 static int smc_clnt_conf_first_link(struct smc_sock
*smc
)
325 struct net
*net
= sock_net(smc
->clcsock
->sk
);
326 struct smc_link_group
*lgr
= smc
->conn
.lgr
;
327 struct smc_link
*link
;
331 link
= &lgr
->lnk
[SMC_SINGLE_LINK
];
332 /* receive CONFIRM LINK request from server over RoCE fabric */
333 rest
= wait_for_completion_interruptible_timeout(
335 SMC_LLC_WAIT_FIRST_TIME
);
337 struct smc_clc_msg_decline dclc
;
339 rc
= smc_clc_wait_msg(smc
, &dclc
, sizeof(dclc
),
344 if (link
->llc_confirm_rc
)
345 return SMC_CLC_DECL_RMBE_EC
;
347 rc
= smc_ib_modify_qp_rts(link
);
349 return SMC_CLC_DECL_ERR_RDYLNK
;
351 smc_wr_remember_qp_attr(link
);
353 if (smc_reg_rmb(link
, smc
->conn
.rmb_desc
, false))
354 return SMC_CLC_DECL_ERR_REGRMB
;
356 /* send CONFIRM LINK response over RoCE fabric */
357 rc
= smc_llc_send_confirm_link(link
, SMC_LLC_RESP
);
359 return SMC_CLC_DECL_TIMEOUT_CL
;
361 /* receive ADD LINK request from server over RoCE fabric */
362 rest
= wait_for_completion_interruptible_timeout(&link
->llc_add
,
365 struct smc_clc_msg_decline dclc
;
367 rc
= smc_clc_wait_msg(smc
, &dclc
, sizeof(dclc
),
372 /* send add link reject message, only one link supported for now */
373 rc
= smc_llc_send_add_link(link
,
374 link
->smcibdev
->mac
[link
->ibport
- 1],
375 link
->gid
, SMC_LLC_RESP
);
377 return SMC_CLC_DECL_TIMEOUT_AL
;
379 smc_llc_link_active(link
, net
->ipv4
.sysctl_tcp_keepalive_time
);
384 static void smcr_conn_save_peer_info(struct smc_sock
*smc
,
385 struct smc_clc_msg_accept_confirm
*clc
)
387 int bufsize
= smc_uncompress_bufsize(clc
->rmbe_size
);
389 smc
->conn
.peer_rmbe_idx
= clc
->rmbe_idx
;
390 smc
->conn
.local_tx_ctrl
.token
= ntohl(clc
->rmbe_alert_token
);
391 smc
->conn
.peer_rmbe_size
= bufsize
;
392 atomic_set(&smc
->conn
.peer_rmbe_space
, smc
->conn
.peer_rmbe_size
);
393 smc
->conn
.tx_off
= bufsize
* (smc
->conn
.peer_rmbe_idx
- 1);
396 static void smcd_conn_save_peer_info(struct smc_sock
*smc
,
397 struct smc_clc_msg_accept_confirm
*clc
)
399 int bufsize
= smc_uncompress_bufsize(clc
->dmbe_size
);
401 smc
->conn
.peer_rmbe_idx
= clc
->dmbe_idx
;
402 smc
->conn
.peer_token
= clc
->token
;
403 /* msg header takes up space in the buffer */
404 smc
->conn
.peer_rmbe_size
= bufsize
- sizeof(struct smcd_cdc_msg
);
405 atomic_set(&smc
->conn
.peer_rmbe_space
, smc
->conn
.peer_rmbe_size
);
406 smc
->conn
.tx_off
= bufsize
* smc
->conn
.peer_rmbe_idx
;
409 static void smc_conn_save_peer_info(struct smc_sock
*smc
,
410 struct smc_clc_msg_accept_confirm
*clc
)
412 if (smc
->conn
.lgr
->is_smcd
)
413 smcd_conn_save_peer_info(smc
, clc
);
415 smcr_conn_save_peer_info(smc
, clc
);
418 static void smc_link_save_peer_info(struct smc_link
*link
,
419 struct smc_clc_msg_accept_confirm
*clc
)
421 link
->peer_qpn
= ntoh24(clc
->qpn
);
422 memcpy(link
->peer_gid
, clc
->lcl
.gid
, SMC_GID_SIZE
);
423 memcpy(link
->peer_mac
, clc
->lcl
.mac
, sizeof(link
->peer_mac
));
424 link
->peer_psn
= ntoh24(clc
->psn
);
425 link
->peer_mtu
= clc
->qp_mtu
;
428 /* fall back during connect */
429 static int smc_connect_fallback(struct smc_sock
*smc
, int reason_code
)
431 smc
->use_fallback
= true;
432 smc
->fallback_rsn
= reason_code
;
433 smc_copy_sock_settings_to_clc(smc
);
434 if (smc
->sk
.sk_state
== SMC_INIT
)
435 smc
->sk
.sk_state
= SMC_ACTIVE
;
439 /* decline and fall back during connect */
440 static int smc_connect_decline_fallback(struct smc_sock
*smc
, int reason_code
)
444 if (reason_code
< 0) { /* error, fallback is not possible */
445 if (smc
->sk
.sk_state
== SMC_INIT
)
446 sock_put(&smc
->sk
); /* passive closing */
449 if (reason_code
!= SMC_CLC_DECL_PEERDECL
) {
450 rc
= smc_clc_send_decline(smc
, reason_code
);
452 if (smc
->sk
.sk_state
== SMC_INIT
)
453 sock_put(&smc
->sk
); /* passive closing */
457 return smc_connect_fallback(smc
, reason_code
);
460 /* abort connecting */
461 static int smc_connect_abort(struct smc_sock
*smc
, int reason_code
,
464 if (local_contact
== SMC_FIRST_CONTACT
)
465 smc_lgr_forget(smc
->conn
.lgr
);
466 mutex_unlock(&smc_create_lgr_pending
);
467 smc_conn_free(&smc
->conn
);
471 /* check if there is a rdma device available for this connection. */
472 /* called for connect and listen */
473 static int smc_check_rdma(struct smc_sock
*smc
, struct smc_ib_device
**ibdev
,
474 u8
*ibport
, unsigned short vlan_id
, u8 gid
[])
478 /* PNET table look up: search active ib_device and port
479 * within same PNETID that also contains the ethernet device
480 * used for the internal TCP socket
482 smc_pnet_find_roce_resource(smc
->clcsock
->sk
, ibdev
, ibport
, vlan_id
,
485 reason_code
= SMC_CLC_DECL_CNFERR
; /* configuration error */
490 /* check if there is an ISM device available for this connection. */
491 /* called for connect and listen */
492 static int smc_check_ism(struct smc_sock
*smc
, struct smcd_dev
**ismdev
)
494 /* Find ISM device with same PNETID as connecting interface */
495 smc_pnet_find_ism_resource(smc
->clcsock
->sk
, ismdev
);
497 return SMC_CLC_DECL_CNFERR
; /* configuration error */
501 /* Check for VLAN ID and register it on ISM device just for CLC handshake */
502 static int smc_connect_ism_vlan_setup(struct smc_sock
*smc
,
503 struct smcd_dev
*ismdev
,
504 unsigned short vlan_id
)
506 if (vlan_id
&& smc_ism_get_vlan(ismdev
, vlan_id
))
507 return SMC_CLC_DECL_CNFERR
;
511 /* cleanup temporary VLAN ID registration used for CLC handshake. If ISM is
512 * used, the VLAN ID will be registered again during the connection setup.
514 static int smc_connect_ism_vlan_cleanup(struct smc_sock
*smc
, bool is_smcd
,
515 struct smcd_dev
*ismdev
,
516 unsigned short vlan_id
)
520 if (vlan_id
&& smc_ism_put_vlan(ismdev
, vlan_id
))
521 return SMC_CLC_DECL_CNFERR
;
525 /* CLC handshake during connect */
526 static int smc_connect_clc(struct smc_sock
*smc
, int smc_type
,
527 struct smc_clc_msg_accept_confirm
*aclc
,
528 struct smc_ib_device
*ibdev
, u8 ibport
,
529 u8 gid
[], struct smcd_dev
*ismdev
)
533 /* do inband token exchange */
534 rc
= smc_clc_send_proposal(smc
, smc_type
, ibdev
, ibport
, gid
, ismdev
);
537 /* receive SMC Accept CLC message */
538 return smc_clc_wait_msg(smc
, aclc
, sizeof(*aclc
), SMC_CLC_ACCEPT
);
541 /* setup for RDMA connection of client */
542 static int smc_connect_rdma(struct smc_sock
*smc
,
543 struct smc_clc_msg_accept_confirm
*aclc
,
544 struct smc_ib_device
*ibdev
, u8 ibport
)
546 int local_contact
= SMC_FIRST_CONTACT
;
547 struct smc_link
*link
;
550 mutex_lock(&smc_create_lgr_pending
);
551 local_contact
= smc_conn_create(smc
, false, aclc
->hdr
.flag
, ibdev
,
552 ibport
, ntoh24(aclc
->qpn
), &aclc
->lcl
,
554 if (local_contact
< 0) {
555 if (local_contact
== -ENOMEM
)
556 reason_code
= SMC_CLC_DECL_MEM
;/* insufficient memory*/
557 else if (local_contact
== -ENOLINK
)
558 reason_code
= SMC_CLC_DECL_SYNCERR
; /* synchr. error */
560 reason_code
= SMC_CLC_DECL_INTERR
; /* other error */
561 return smc_connect_abort(smc
, reason_code
, 0);
563 link
= &smc
->conn
.lgr
->lnk
[SMC_SINGLE_LINK
];
565 smc_conn_save_peer_info(smc
, aclc
);
567 /* create send buffer and rmb */
568 if (smc_buf_create(smc
, false))
569 return smc_connect_abort(smc
, SMC_CLC_DECL_MEM
, local_contact
);
571 if (local_contact
== SMC_FIRST_CONTACT
)
572 smc_link_save_peer_info(link
, aclc
);
574 if (smc_rmb_rtoken_handling(&smc
->conn
, aclc
))
575 return smc_connect_abort(smc
, SMC_CLC_DECL_ERR_RTOK
,
581 if (local_contact
== SMC_FIRST_CONTACT
) {
582 if (smc_ib_ready_link(link
))
583 return smc_connect_abort(smc
, SMC_CLC_DECL_ERR_RDYLNK
,
586 if (!smc
->conn
.rmb_desc
->reused
&&
587 smc_reg_rmb(link
, smc
->conn
.rmb_desc
, true))
588 return smc_connect_abort(smc
, SMC_CLC_DECL_ERR_REGRMB
,
591 smc_rmb_sync_sg_for_device(&smc
->conn
);
593 reason_code
= smc_clc_send_confirm(smc
);
595 return smc_connect_abort(smc
, reason_code
, local_contact
);
599 if (local_contact
== SMC_FIRST_CONTACT
) {
600 /* QP confirmation over RoCE fabric */
601 reason_code
= smc_clnt_conf_first_link(smc
);
603 return smc_connect_abort(smc
, reason_code
,
606 mutex_unlock(&smc_create_lgr_pending
);
608 smc_copy_sock_settings_to_clc(smc
);
609 if (smc
->sk
.sk_state
== SMC_INIT
)
610 smc
->sk
.sk_state
= SMC_ACTIVE
;
615 /* setup for ISM connection of client */
616 static int smc_connect_ism(struct smc_sock
*smc
,
617 struct smc_clc_msg_accept_confirm
*aclc
,
618 struct smcd_dev
*ismdev
)
620 int local_contact
= SMC_FIRST_CONTACT
;
623 mutex_lock(&smc_create_lgr_pending
);
624 local_contact
= smc_conn_create(smc
, true, aclc
->hdr
.flag
, NULL
, 0, 0,
625 NULL
, ismdev
, aclc
->gid
);
626 if (local_contact
< 0)
627 return smc_connect_abort(smc
, SMC_CLC_DECL_MEM
, 0);
629 /* Create send and receive buffers */
630 if (smc_buf_create(smc
, true))
631 return smc_connect_abort(smc
, SMC_CLC_DECL_MEM
, local_contact
);
633 smc_conn_save_peer_info(smc
, aclc
);
638 rc
= smc_clc_send_confirm(smc
);
640 return smc_connect_abort(smc
, rc
, local_contact
);
641 mutex_unlock(&smc_create_lgr_pending
);
643 smc_copy_sock_settings_to_clc(smc
);
644 if (smc
->sk
.sk_state
== SMC_INIT
)
645 smc
->sk
.sk_state
= SMC_ACTIVE
;
650 /* perform steps before actually connecting */
651 static int __smc_connect(struct smc_sock
*smc
)
653 bool ism_supported
= false, rdma_supported
= false;
654 struct smc_clc_msg_accept_confirm aclc
;
655 struct smc_ib_device
*ibdev
;
656 struct smcd_dev
*ismdev
;
657 u8 gid
[SMC_GID_SIZE
];
663 sock_hold(&smc
->sk
); /* sock put in passive closing */
665 if (smc
->use_fallback
)
666 return smc_connect_fallback(smc
, smc
->fallback_rsn
);
668 /* if peer has not signalled SMC-capability, fall back */
669 if (!tcp_sk(smc
->clcsock
->sk
)->syn_smc
)
670 return smc_connect_fallback(smc
, SMC_CLC_DECL_PEERNOSMC
);
672 /* IPSec connections opt out of SMC-R optimizations */
673 if (using_ipsec(smc
))
674 return smc_connect_decline_fallback(smc
, SMC_CLC_DECL_IPSEC
);
676 /* check for VLAN ID */
677 if (smc_vlan_by_tcpsk(smc
->clcsock
, &vlan
))
678 return smc_connect_decline_fallback(smc
, SMC_CLC_DECL_CNFERR
);
680 /* check if there is an ism device available */
681 if (!smc_check_ism(smc
, &ismdev
) &&
682 !smc_connect_ism_vlan_setup(smc
, ismdev
, vlan
)) {
683 /* ISM is supported for this connection */
684 ism_supported
= true;
685 smc_type
= SMC_TYPE_D
;
688 /* check if there is a rdma device available */
689 if (!smc_check_rdma(smc
, &ibdev
, &ibport
, vlan
, gid
)) {
690 /* RDMA is supported for this connection */
691 rdma_supported
= true;
693 smc_type
= SMC_TYPE_B
; /* both */
695 smc_type
= SMC_TYPE_R
; /* only RDMA */
698 /* if neither ISM nor RDMA are supported, fallback */
699 if (!rdma_supported
&& !ism_supported
)
700 return smc_connect_decline_fallback(smc
, SMC_CLC_DECL_NOSMCDEV
);
702 /* perform CLC handshake */
703 rc
= smc_connect_clc(smc
, smc_type
, &aclc
, ibdev
, ibport
, gid
, ismdev
);
705 smc_connect_ism_vlan_cleanup(smc
, ism_supported
, ismdev
, vlan
);
706 return smc_connect_decline_fallback(smc
, rc
);
709 /* depending on previous steps, connect using rdma or ism */
710 if (rdma_supported
&& aclc
.hdr
.path
== SMC_TYPE_R
)
711 rc
= smc_connect_rdma(smc
, &aclc
, ibdev
, ibport
);
712 else if (ism_supported
&& aclc
.hdr
.path
== SMC_TYPE_D
)
713 rc
= smc_connect_ism(smc
, &aclc
, ismdev
);
715 rc
= SMC_CLC_DECL_MODEUNSUPP
;
717 smc_connect_ism_vlan_cleanup(smc
, ism_supported
, ismdev
, vlan
);
718 return smc_connect_decline_fallback(smc
, rc
);
721 smc_connect_ism_vlan_cleanup(smc
, ism_supported
, ismdev
, vlan
);
725 static void smc_connect_work(struct work_struct
*work
)
727 struct smc_sock
*smc
= container_of(work
, struct smc_sock
,
732 rc
= kernel_connect(smc
->clcsock
, &smc
->connect_info
->addr
,
733 smc
->connect_info
->alen
, smc
->connect_info
->flags
);
734 if (smc
->clcsock
->sk
->sk_err
) {
735 smc
->sk
.sk_err
= smc
->clcsock
->sk
->sk_err
;
739 smc
->sk
.sk_err
= -rc
;
743 rc
= __smc_connect(smc
);
745 smc
->sk
.sk_err
= -rc
;
749 smc
->sk
.sk_state_change(&smc
->sk
);
751 smc
->sk
.sk_write_space(&smc
->sk
);
752 kfree(smc
->connect_info
);
753 smc
->connect_info
= NULL
;
754 release_sock(&smc
->sk
);
757 static int smc_connect(struct socket
*sock
, struct sockaddr
*addr
,
760 struct sock
*sk
= sock
->sk
;
761 struct smc_sock
*smc
;
766 /* separate smc parameter checking to be safe */
767 if (alen
< sizeof(addr
->sa_family
))
769 if (addr
->sa_family
!= AF_INET
&& addr
->sa_family
!= AF_INET6
)
773 switch (sk
->sk_state
) {
784 smc_copy_sock_settings_to_clc(smc
);
785 tcp_sk(smc
->clcsock
->sk
)->syn_smc
= 1;
786 if (flags
& O_NONBLOCK
) {
787 if (smc
->connect_info
) {
791 smc
->connect_info
= kzalloc(alen
+ 2 * sizeof(int), GFP_KERNEL
);
792 if (!smc
->connect_info
) {
796 smc
->connect_info
->alen
= alen
;
797 smc
->connect_info
->flags
= flags
^ O_NONBLOCK
;
798 memcpy(&smc
->connect_info
->addr
, addr
, alen
);
799 schedule_work(&smc
->connect_work
);
802 rc
= kernel_connect(smc
->clcsock
, addr
, alen
, flags
);
806 rc
= __smc_connect(smc
);
810 rc
= 0; /* success cases including fallback */
819 static int smc_clcsock_accept(struct smc_sock
*lsmc
, struct smc_sock
**new_smc
)
821 struct socket
*new_clcsock
= NULL
;
822 struct sock
*lsk
= &lsmc
->sk
;
827 new_sk
= smc_sock_alloc(sock_net(lsk
), NULL
, lsk
->sk_protocol
);
830 lsk
->sk_err
= ENOMEM
;
835 *new_smc
= smc_sk(new_sk
);
837 rc
= kernel_accept(lsmc
->clcsock
, &new_clcsock
, 0);
841 if (rc
< 0 || lsk
->sk_state
== SMC_CLOSED
) {
843 sock_release(new_clcsock
);
844 new_sk
->sk_state
= SMC_CLOSED
;
845 sock_set_flag(new_sk
, SOCK_DEAD
);
846 new_sk
->sk_prot
->unhash(new_sk
);
847 sock_put(new_sk
); /* final */
852 (*new_smc
)->clcsock
= new_clcsock
;
857 /* add a just created sock to the accept queue of the listen sock as
858 * candidate for a following socket accept call from user space
860 static void smc_accept_enqueue(struct sock
*parent
, struct sock
*sk
)
862 struct smc_sock
*par
= smc_sk(parent
);
864 sock_hold(sk
); /* sock_put in smc_accept_unlink () */
865 spin_lock(&par
->accept_q_lock
);
866 list_add_tail(&smc_sk(sk
)->accept_q
, &par
->accept_q
);
867 spin_unlock(&par
->accept_q_lock
);
868 sk_acceptq_added(parent
);
871 /* remove a socket from the accept queue of its parental listening socket */
872 static void smc_accept_unlink(struct sock
*sk
)
874 struct smc_sock
*par
= smc_sk(sk
)->listen_smc
;
876 spin_lock(&par
->accept_q_lock
);
877 list_del_init(&smc_sk(sk
)->accept_q
);
878 spin_unlock(&par
->accept_q_lock
);
879 sk_acceptq_removed(&smc_sk(sk
)->listen_smc
->sk
);
880 sock_put(sk
); /* sock_hold in smc_accept_enqueue */
883 /* remove a sock from the accept queue to bind it to a new socket created
884 * for a socket accept call from user space
886 struct sock
*smc_accept_dequeue(struct sock
*parent
,
887 struct socket
*new_sock
)
889 struct smc_sock
*isk
, *n
;
892 list_for_each_entry_safe(isk
, n
, &smc_sk(parent
)->accept_q
, accept_q
) {
893 new_sk
= (struct sock
*)isk
;
895 smc_accept_unlink(new_sk
);
896 if (new_sk
->sk_state
== SMC_CLOSED
) {
898 sock_release(isk
->clcsock
);
901 new_sk
->sk_prot
->unhash(new_sk
);
902 sock_put(new_sk
); /* final */
906 sock_graft(new_sk
, new_sock
);
912 /* clean up for a created but never accepted sock */
913 void smc_close_non_accepted(struct sock
*sk
)
915 struct smc_sock
*smc
= smc_sk(sk
);
918 if (!sk
->sk_lingertime
)
919 /* wait for peer closing */
920 sk
->sk_lingertime
= SMC_MAX_STREAM_WAIT_TIMEOUT
;
921 if (!smc
->use_fallback
) {
922 smc_close_active(smc
);
923 sock_set_flag(sk
, SOCK_DEAD
);
924 sk
->sk_shutdown
|= SHUTDOWN_MASK
;
933 if (smc
->use_fallback
) {
934 sock_put(sk
); /* passive closing */
935 sk
->sk_state
= SMC_CLOSED
;
937 if (sk
->sk_state
== SMC_CLOSED
)
938 smc_conn_free(&smc
->conn
);
941 sk
->sk_prot
->unhash(sk
);
942 sock_put(sk
); /* final sock_put */
945 static int smc_serv_conf_first_link(struct smc_sock
*smc
)
947 struct net
*net
= sock_net(smc
->clcsock
->sk
);
948 struct smc_link_group
*lgr
= smc
->conn
.lgr
;
949 struct smc_link
*link
;
953 link
= &lgr
->lnk
[SMC_SINGLE_LINK
];
955 if (smc_reg_rmb(link
, smc
->conn
.rmb_desc
, false))
956 return SMC_CLC_DECL_ERR_REGRMB
;
958 /* send CONFIRM LINK request to client over the RoCE fabric */
959 rc
= smc_llc_send_confirm_link(link
, SMC_LLC_REQ
);
961 return SMC_CLC_DECL_TIMEOUT_CL
;
963 /* receive CONFIRM LINK response from client over the RoCE fabric */
964 rest
= wait_for_completion_interruptible_timeout(
965 &link
->llc_confirm_resp
,
966 SMC_LLC_WAIT_FIRST_TIME
);
968 struct smc_clc_msg_decline dclc
;
970 rc
= smc_clc_wait_msg(smc
, &dclc
, sizeof(dclc
),
975 if (link
->llc_confirm_resp_rc
)
976 return SMC_CLC_DECL_RMBE_EC
;
978 /* send ADD LINK request to client over the RoCE fabric */
979 rc
= smc_llc_send_add_link(link
,
980 link
->smcibdev
->mac
[link
->ibport
- 1],
981 link
->gid
, SMC_LLC_REQ
);
983 return SMC_CLC_DECL_TIMEOUT_AL
;
985 /* receive ADD LINK response from client over the RoCE fabric */
986 rest
= wait_for_completion_interruptible_timeout(&link
->llc_add_resp
,
989 struct smc_clc_msg_decline dclc
;
991 rc
= smc_clc_wait_msg(smc
, &dclc
, sizeof(dclc
),
996 smc_llc_link_active(link
, net
->ipv4
.sysctl_tcp_keepalive_time
);
1001 /* listen worker: finish */
1002 static void smc_listen_out(struct smc_sock
*new_smc
)
1004 struct smc_sock
*lsmc
= new_smc
->listen_smc
;
1005 struct sock
*newsmcsk
= &new_smc
->sk
;
1007 lock_sock_nested(&lsmc
->sk
, SINGLE_DEPTH_NESTING
);
1008 if (lsmc
->sk
.sk_state
== SMC_LISTEN
) {
1009 smc_accept_enqueue(&lsmc
->sk
, newsmcsk
);
1010 } else { /* no longer listening */
1011 smc_close_non_accepted(newsmcsk
);
1013 release_sock(&lsmc
->sk
);
1015 /* Wake up accept */
1016 lsmc
->sk
.sk_data_ready(&lsmc
->sk
);
1017 sock_put(&lsmc
->sk
); /* sock_hold in smc_tcp_listen_work */
1020 /* listen worker: finish in state connected */
1021 static void smc_listen_out_connected(struct smc_sock
*new_smc
)
1023 struct sock
*newsmcsk
= &new_smc
->sk
;
1025 sk_refcnt_debug_inc(newsmcsk
);
1026 if (newsmcsk
->sk_state
== SMC_INIT
)
1027 newsmcsk
->sk_state
= SMC_ACTIVE
;
1029 smc_listen_out(new_smc
);
1032 /* listen worker: finish in error state */
1033 static void smc_listen_out_err(struct smc_sock
*new_smc
)
1035 struct sock
*newsmcsk
= &new_smc
->sk
;
1037 if (newsmcsk
->sk_state
== SMC_INIT
)
1038 sock_put(&new_smc
->sk
); /* passive closing */
1039 newsmcsk
->sk_state
= SMC_CLOSED
;
1040 smc_conn_free(&new_smc
->conn
);
1042 smc_listen_out(new_smc
);
1045 /* listen worker: decline and fall back if possible */
1046 static void smc_listen_decline(struct smc_sock
*new_smc
, int reason_code
,
1049 /* RDMA setup failed, switch back to TCP */
1050 if (local_contact
== SMC_FIRST_CONTACT
)
1051 smc_lgr_forget(new_smc
->conn
.lgr
);
1052 if (reason_code
< 0) { /* error, no fallback possible */
1053 smc_listen_out_err(new_smc
);
1056 smc_conn_free(&new_smc
->conn
);
1057 new_smc
->use_fallback
= true;
1058 new_smc
->fallback_rsn
= reason_code
;
1059 if (reason_code
&& reason_code
!= SMC_CLC_DECL_PEERDECL
) {
1060 if (smc_clc_send_decline(new_smc
, reason_code
) < 0) {
1061 smc_listen_out_err(new_smc
);
1065 smc_listen_out_connected(new_smc
);
1068 /* listen worker: check prefixes */
1069 static int smc_listen_rdma_check(struct smc_sock
*new_smc
,
1070 struct smc_clc_msg_proposal
*pclc
)
1072 struct smc_clc_msg_proposal_prefix
*pclc_prfx
;
1073 struct socket
*newclcsock
= new_smc
->clcsock
;
1075 pclc_prfx
= smc_clc_proposal_get_prefix(pclc
);
1076 if (smc_clc_prfx_match(newclcsock
, pclc_prfx
))
1077 return SMC_CLC_DECL_CNFERR
;
1082 /* listen worker: initialize connection and buffers */
1083 static int smc_listen_rdma_init(struct smc_sock
*new_smc
,
1084 struct smc_clc_msg_proposal
*pclc
,
1085 struct smc_ib_device
*ibdev
, u8 ibport
,
1088 /* allocate connection / link group */
1089 *local_contact
= smc_conn_create(new_smc
, false, 0, ibdev
, ibport
, 0,
1090 &pclc
->lcl
, NULL
, 0);
1091 if (*local_contact
< 0) {
1092 if (*local_contact
== -ENOMEM
)
1093 return SMC_CLC_DECL_MEM
;/* insufficient memory*/
1094 return SMC_CLC_DECL_INTERR
; /* other error */
1097 /* create send buffer and rmb */
1098 if (smc_buf_create(new_smc
, false))
1099 return SMC_CLC_DECL_MEM
;
1104 /* listen worker: initialize connection and buffers for SMC-D */
1105 static int smc_listen_ism_init(struct smc_sock
*new_smc
,
1106 struct smc_clc_msg_proposal
*pclc
,
1107 struct smcd_dev
*ismdev
,
1110 struct smc_clc_msg_smcd
*pclc_smcd
;
1112 pclc_smcd
= smc_get_clc_msg_smcd(pclc
);
1113 *local_contact
= smc_conn_create(new_smc
, true, 0, NULL
, 0, 0, NULL
,
1114 ismdev
, pclc_smcd
->gid
);
1115 if (*local_contact
< 0) {
1116 if (*local_contact
== -ENOMEM
)
1117 return SMC_CLC_DECL_MEM
;/* insufficient memory*/
1118 return SMC_CLC_DECL_INTERR
; /* other error */
1121 /* Check if peer can be reached via ISM device */
1122 if (smc_ism_cantalk(new_smc
->conn
.lgr
->peer_gid
,
1123 new_smc
->conn
.lgr
->vlan_id
,
1124 new_smc
->conn
.lgr
->smcd
)) {
1125 if (*local_contact
== SMC_FIRST_CONTACT
)
1126 smc_lgr_forget(new_smc
->conn
.lgr
);
1127 smc_conn_free(&new_smc
->conn
);
1128 return SMC_CLC_DECL_CNFERR
;
1131 /* Create send and receive buffers */
1132 if (smc_buf_create(new_smc
, true)) {
1133 if (*local_contact
== SMC_FIRST_CONTACT
)
1134 smc_lgr_forget(new_smc
->conn
.lgr
);
1135 smc_conn_free(&new_smc
->conn
);
1136 return SMC_CLC_DECL_MEM
;
1142 /* listen worker: register buffers */
1143 static int smc_listen_rdma_reg(struct smc_sock
*new_smc
, int local_contact
)
1145 struct smc_link
*link
= &new_smc
->conn
.lgr
->lnk
[SMC_SINGLE_LINK
];
1147 if (local_contact
!= SMC_FIRST_CONTACT
) {
1148 if (!new_smc
->conn
.rmb_desc
->reused
) {
1149 if (smc_reg_rmb(link
, new_smc
->conn
.rmb_desc
, true))
1150 return SMC_CLC_DECL_ERR_REGRMB
;
1153 smc_rmb_sync_sg_for_device(&new_smc
->conn
);
1158 /* listen worker: finish RDMA setup */
1159 static int smc_listen_rdma_finish(struct smc_sock
*new_smc
,
1160 struct smc_clc_msg_accept_confirm
*cclc
,
1163 struct smc_link
*link
= &new_smc
->conn
.lgr
->lnk
[SMC_SINGLE_LINK
];
1164 int reason_code
= 0;
1166 if (local_contact
== SMC_FIRST_CONTACT
)
1167 smc_link_save_peer_info(link
, cclc
);
1169 if (smc_rmb_rtoken_handling(&new_smc
->conn
, cclc
)) {
1170 reason_code
= SMC_CLC_DECL_ERR_RTOK
;
1174 if (local_contact
== SMC_FIRST_CONTACT
) {
1175 if (smc_ib_ready_link(link
)) {
1176 reason_code
= SMC_CLC_DECL_ERR_RDYLNK
;
1179 /* QP confirmation over RoCE fabric */
1180 reason_code
= smc_serv_conf_first_link(new_smc
);
1187 mutex_unlock(&smc_create_lgr_pending
);
1188 smc_listen_decline(new_smc
, reason_code
, local_contact
);
1192 /* setup for RDMA connection of server */
1193 static void smc_listen_work(struct work_struct
*work
)
1195 struct smc_sock
*new_smc
= container_of(work
, struct smc_sock
,
1197 struct socket
*newclcsock
= new_smc
->clcsock
;
1198 struct smc_clc_msg_accept_confirm cclc
;
1199 struct smc_clc_msg_proposal
*pclc
;
1200 struct smc_ib_device
*ibdev
;
1201 bool ism_supported
= false;
1202 struct smcd_dev
*ismdev
;
1203 u8 buf
[SMC_CLC_MAX_LEN
];
1204 int local_contact
= 0;
1205 unsigned short vlan
;
1206 int reason_code
= 0;
1210 if (new_smc
->use_fallback
) {
1211 smc_listen_out_connected(new_smc
);
1215 /* check if peer is smc capable */
1216 if (!tcp_sk(newclcsock
->sk
)->syn_smc
) {
1217 new_smc
->use_fallback
= true;
1218 new_smc
->fallback_rsn
= SMC_CLC_DECL_PEERNOSMC
;
1219 smc_listen_out_connected(new_smc
);
1223 /* do inband token exchange -
1224 * wait for and receive SMC Proposal CLC message
1226 pclc
= (struct smc_clc_msg_proposal
*)&buf
;
1227 reason_code
= smc_clc_wait_msg(new_smc
, pclc
, SMC_CLC_MAX_LEN
,
1230 smc_listen_decline(new_smc
, reason_code
, 0);
1234 /* IPSec connections opt out of SMC-R optimizations */
1235 if (using_ipsec(new_smc
)) {
1236 smc_listen_decline(new_smc
, SMC_CLC_DECL_IPSEC
, 0);
1240 mutex_lock(&smc_create_lgr_pending
);
1241 smc_close_init(new_smc
);
1242 smc_rx_init(new_smc
);
1243 smc_tx_init(new_smc
);
1245 /* check if ISM is available */
1246 if ((pclc
->hdr
.path
== SMC_TYPE_D
|| pclc
->hdr
.path
== SMC_TYPE_B
) &&
1247 !smc_check_ism(new_smc
, &ismdev
) &&
1248 !smc_listen_ism_init(new_smc
, pclc
, ismdev
, &local_contact
)) {
1249 ism_supported
= true;
1252 /* check if RDMA is available */
1253 if (!ism_supported
&&
1254 ((pclc
->hdr
.path
!= SMC_TYPE_R
&& pclc
->hdr
.path
!= SMC_TYPE_B
) ||
1255 smc_vlan_by_tcpsk(new_smc
->clcsock
, &vlan
) ||
1256 smc_check_rdma(new_smc
, &ibdev
, &ibport
, vlan
, NULL
) ||
1257 smc_listen_rdma_check(new_smc
, pclc
) ||
1258 smc_listen_rdma_init(new_smc
, pclc
, ibdev
, ibport
,
1260 smc_listen_rdma_reg(new_smc
, local_contact
))) {
1261 /* SMC not supported, decline */
1262 mutex_unlock(&smc_create_lgr_pending
);
1263 smc_listen_decline(new_smc
, SMC_CLC_DECL_MODEUNSUPP
,
1268 /* send SMC Accept CLC message */
1269 rc
= smc_clc_send_accept(new_smc
, local_contact
);
1271 mutex_unlock(&smc_create_lgr_pending
);
1272 smc_listen_decline(new_smc
, rc
, local_contact
);
1276 /* receive SMC Confirm CLC message */
1277 reason_code
= smc_clc_wait_msg(new_smc
, &cclc
, sizeof(cclc
),
1280 mutex_unlock(&smc_create_lgr_pending
);
1281 smc_listen_decline(new_smc
, reason_code
, local_contact
);
1286 if (!ism_supported
) {
1287 if (smc_listen_rdma_finish(new_smc
, &cclc
, local_contact
))
1290 smc_conn_save_peer_info(new_smc
, &cclc
);
1291 mutex_unlock(&smc_create_lgr_pending
);
1292 smc_listen_out_connected(new_smc
);
1295 static void smc_tcp_listen_work(struct work_struct
*work
)
1297 struct smc_sock
*lsmc
= container_of(work
, struct smc_sock
,
1299 struct sock
*lsk
= &lsmc
->sk
;
1300 struct smc_sock
*new_smc
;
1304 while (lsk
->sk_state
== SMC_LISTEN
) {
1305 rc
= smc_clcsock_accept(lsmc
, &new_smc
);
1311 new_smc
->listen_smc
= lsmc
;
1312 new_smc
->use_fallback
= lsmc
->use_fallback
;
1313 new_smc
->fallback_rsn
= lsmc
->fallback_rsn
;
1314 sock_hold(lsk
); /* sock_put in smc_listen_work */
1315 INIT_WORK(&new_smc
->smc_listen_work
, smc_listen_work
);
1316 smc_copy_sock_settings_to_smc(new_smc
);
1317 new_smc
->sk
.sk_sndbuf
= lsmc
->sk
.sk_sndbuf
;
1318 new_smc
->sk
.sk_rcvbuf
= lsmc
->sk
.sk_rcvbuf
;
1319 sock_hold(&new_smc
->sk
); /* sock_put in passive closing */
1320 if (!schedule_work(&new_smc
->smc_listen_work
))
1321 sock_put(&new_smc
->sk
);
1326 sock_put(&lsmc
->sk
); /* sock_hold in smc_listen */
1329 static int smc_listen(struct socket
*sock
, int backlog
)
1331 struct sock
*sk
= sock
->sk
;
1332 struct smc_sock
*smc
;
1339 if ((sk
->sk_state
!= SMC_INIT
) && (sk
->sk_state
!= SMC_LISTEN
))
1343 if (sk
->sk_state
== SMC_LISTEN
) {
1344 sk
->sk_max_ack_backlog
= backlog
;
1347 /* some socket options are handled in core, so we could not apply
1348 * them to the clc socket -- copy smc socket options to clc socket
1350 smc_copy_sock_settings_to_clc(smc
);
1351 if (!smc
->use_fallback
)
1352 tcp_sk(smc
->clcsock
->sk
)->syn_smc
= 1;
1354 rc
= kernel_listen(smc
->clcsock
, backlog
);
1357 sk
->sk_max_ack_backlog
= backlog
;
1358 sk
->sk_ack_backlog
= 0;
1359 sk
->sk_state
= SMC_LISTEN
;
1360 INIT_WORK(&smc
->tcp_listen_work
, smc_tcp_listen_work
);
1361 sock_hold(sk
); /* sock_hold in tcp_listen_worker */
1362 if (!schedule_work(&smc
->tcp_listen_work
))
1370 static int smc_accept(struct socket
*sock
, struct socket
*new_sock
,
1371 int flags
, bool kern
)
1373 struct sock
*sk
= sock
->sk
, *nsk
;
1374 DECLARE_WAITQUEUE(wait
, current
);
1375 struct smc_sock
*lsmc
;
1380 sock_hold(sk
); /* sock_put below */
1383 if (lsmc
->sk
.sk_state
!= SMC_LISTEN
) {
1389 /* Wait for an incoming connection */
1390 timeo
= sock_rcvtimeo(sk
, flags
& O_NONBLOCK
);
1391 add_wait_queue_exclusive(sk_sleep(sk
), &wait
);
1392 while (!(nsk
= smc_accept_dequeue(sk
, new_sock
))) {
1393 set_current_state(TASK_INTERRUPTIBLE
);
1399 timeo
= schedule_timeout(timeo
);
1400 /* wakeup by sk_data_ready in smc_listen_work() */
1401 sched_annotate_sleep();
1403 if (signal_pending(current
)) {
1404 rc
= sock_intr_errno(timeo
);
1408 set_current_state(TASK_RUNNING
);
1409 remove_wait_queue(sk_sleep(sk
), &wait
);
1412 rc
= sock_error(nsk
);
1417 if (lsmc
->sockopt_defer_accept
&& !(flags
& O_NONBLOCK
)) {
1418 /* wait till data arrives on the socket */
1419 timeo
= msecs_to_jiffies(lsmc
->sockopt_defer_accept
*
1421 if (smc_sk(nsk
)->use_fallback
) {
1422 struct sock
*clcsk
= smc_sk(nsk
)->clcsock
->sk
;
1425 if (skb_queue_empty(&clcsk
->sk_receive_queue
))
1426 sk_wait_data(clcsk
, &timeo
, NULL
);
1427 release_sock(clcsk
);
1428 } else if (!atomic_read(&smc_sk(nsk
)->conn
.bytes_to_rcv
)) {
1430 smc_rx_wait(smc_sk(nsk
), &timeo
, smc_rx_data_available
);
1436 sock_put(sk
); /* sock_hold above */
1440 static int smc_getname(struct socket
*sock
, struct sockaddr
*addr
,
1443 struct smc_sock
*smc
;
1445 if (peer
&& (sock
->sk
->sk_state
!= SMC_ACTIVE
) &&
1446 (sock
->sk
->sk_state
!= SMC_APPCLOSEWAIT1
))
1449 smc
= smc_sk(sock
->sk
);
1451 return smc
->clcsock
->ops
->getname(smc
->clcsock
, addr
, peer
);
1454 static int smc_sendmsg(struct socket
*sock
, struct msghdr
*msg
, size_t len
)
1456 struct sock
*sk
= sock
->sk
;
1457 struct smc_sock
*smc
;
1462 if ((sk
->sk_state
!= SMC_ACTIVE
) &&
1463 (sk
->sk_state
!= SMC_APPCLOSEWAIT1
) &&
1464 (sk
->sk_state
!= SMC_INIT
))
1467 if (msg
->msg_flags
& MSG_FASTOPEN
) {
1468 if (sk
->sk_state
== SMC_INIT
) {
1469 smc
->use_fallback
= true;
1470 smc
->fallback_rsn
= SMC_CLC_DECL_OPTUNSUPP
;
1477 if (smc
->use_fallback
)
1478 rc
= smc
->clcsock
->ops
->sendmsg(smc
->clcsock
, msg
, len
);
1480 rc
= smc_tx_sendmsg(smc
, msg
, len
);
1486 static int smc_recvmsg(struct socket
*sock
, struct msghdr
*msg
, size_t len
,
1489 struct sock
*sk
= sock
->sk
;
1490 struct smc_sock
*smc
;
1495 if ((sk
->sk_state
== SMC_INIT
) ||
1496 (sk
->sk_state
== SMC_LISTEN
) ||
1497 (sk
->sk_state
== SMC_CLOSED
))
1500 if (sk
->sk_state
== SMC_PEERFINCLOSEWAIT
) {
1505 if (smc
->use_fallback
) {
1506 rc
= smc
->clcsock
->ops
->recvmsg(smc
->clcsock
, msg
, len
, flags
);
1508 msg
->msg_namelen
= 0;
1509 rc
= smc_rx_recvmsg(smc
, msg
, NULL
, len
, flags
);
1517 static __poll_t
smc_accept_poll(struct sock
*parent
)
1519 struct smc_sock
*isk
= smc_sk(parent
);
1522 spin_lock(&isk
->accept_q_lock
);
1523 if (!list_empty(&isk
->accept_q
))
1524 mask
= EPOLLIN
| EPOLLRDNORM
;
1525 spin_unlock(&isk
->accept_q_lock
);
1530 static __poll_t
smc_poll(struct file
*file
, struct socket
*sock
,
1533 struct sock
*sk
= sock
->sk
;
1535 struct smc_sock
*smc
;
1540 smc
= smc_sk(sock
->sk
);
1541 if (smc
->use_fallback
) {
1542 /* delegate to CLC child sock */
1543 mask
= smc
->clcsock
->ops
->poll(file
, smc
->clcsock
, wait
);
1544 sk
->sk_err
= smc
->clcsock
->sk
->sk_err
;
1548 if (sk
->sk_state
!= SMC_CLOSED
)
1549 sock_poll_wait(file
, sock
, wait
);
1552 if ((sk
->sk_shutdown
== SHUTDOWN_MASK
) ||
1553 (sk
->sk_state
== SMC_CLOSED
))
1555 if (sk
->sk_state
== SMC_LISTEN
) {
1556 /* woken up by sk_data_ready in smc_listen_work() */
1557 mask
= smc_accept_poll(sk
);
1559 if (atomic_read(&smc
->conn
.sndbuf_space
) ||
1560 sk
->sk_shutdown
& SEND_SHUTDOWN
) {
1561 mask
|= EPOLLOUT
| EPOLLWRNORM
;
1563 sk_set_bit(SOCKWQ_ASYNC_NOSPACE
, sk
);
1564 set_bit(SOCK_NOSPACE
, &sk
->sk_socket
->flags
);
1566 if (atomic_read(&smc
->conn
.bytes_to_rcv
))
1567 mask
|= EPOLLIN
| EPOLLRDNORM
;
1568 if (sk
->sk_shutdown
& RCV_SHUTDOWN
)
1569 mask
|= EPOLLIN
| EPOLLRDNORM
| EPOLLRDHUP
;
1570 if (sk
->sk_state
== SMC_APPCLOSEWAIT1
)
1572 if (smc
->conn
.urg_state
== SMC_URG_VALID
)
1580 static int smc_shutdown(struct socket
*sock
, int how
)
1582 struct sock
*sk
= sock
->sk
;
1583 struct smc_sock
*smc
;
1589 if ((how
< SHUT_RD
) || (how
> SHUT_RDWR
))
1595 if ((sk
->sk_state
!= SMC_ACTIVE
) &&
1596 (sk
->sk_state
!= SMC_PEERCLOSEWAIT1
) &&
1597 (sk
->sk_state
!= SMC_PEERCLOSEWAIT2
) &&
1598 (sk
->sk_state
!= SMC_APPCLOSEWAIT1
) &&
1599 (sk
->sk_state
!= SMC_APPCLOSEWAIT2
) &&
1600 (sk
->sk_state
!= SMC_APPFINCLOSEWAIT
))
1602 if (smc
->use_fallback
) {
1603 rc
= kernel_sock_shutdown(smc
->clcsock
, how
);
1604 sk
->sk_shutdown
= smc
->clcsock
->sk
->sk_shutdown
;
1605 if (sk
->sk_shutdown
== SHUTDOWN_MASK
)
1606 sk
->sk_state
= SMC_CLOSED
;
1610 case SHUT_RDWR
: /* shutdown in both directions */
1611 rc
= smc_close_active(smc
);
1614 rc
= smc_close_shutdown_write(smc
);
1618 /* nothing more to do because peer is not involved */
1622 rc1
= kernel_sock_shutdown(smc
->clcsock
, how
);
1623 /* map sock_shutdown_cmd constants to sk_shutdown value range */
1624 sk
->sk_shutdown
|= how
+ 1;
1628 return rc
? rc
: rc1
;
1631 static int smc_setsockopt(struct socket
*sock
, int level
, int optname
,
1632 char __user
*optval
, unsigned int optlen
)
1634 struct sock
*sk
= sock
->sk
;
1635 struct smc_sock
*smc
;
1640 /* generic setsockopts reaching us here always apply to the
1643 rc
= smc
->clcsock
->ops
->setsockopt(smc
->clcsock
, level
, optname
,
1645 if (smc
->clcsock
->sk
->sk_err
) {
1646 sk
->sk_err
= smc
->clcsock
->sk
->sk_err
;
1647 sk
->sk_error_report(sk
);
1652 if (optlen
< sizeof(int))
1654 if (get_user(val
, (int __user
*)optval
))
1661 case TCP_FASTOPEN_CONNECT
:
1662 case TCP_FASTOPEN_KEY
:
1663 case TCP_FASTOPEN_NO_COOKIE
:
1664 /* option not supported by SMC */
1665 if (sk
->sk_state
== SMC_INIT
) {
1666 smc
->use_fallback
= true;
1667 smc
->fallback_rsn
= SMC_CLC_DECL_OPTUNSUPP
;
1669 if (!smc
->use_fallback
)
1674 if (sk
->sk_state
!= SMC_INIT
&& sk
->sk_state
!= SMC_LISTEN
) {
1675 if (val
&& !smc
->use_fallback
)
1676 mod_delayed_work(system_wq
, &smc
->conn
.tx_work
,
1681 if (sk
->sk_state
!= SMC_INIT
&& sk
->sk_state
!= SMC_LISTEN
) {
1682 if (!val
&& !smc
->use_fallback
)
1683 mod_delayed_work(system_wq
, &smc
->conn
.tx_work
,
1687 case TCP_DEFER_ACCEPT
:
1688 smc
->sockopt_defer_accept
= val
;
1698 static int smc_getsockopt(struct socket
*sock
, int level
, int optname
,
1699 char __user
*optval
, int __user
*optlen
)
1701 struct smc_sock
*smc
;
1703 smc
= smc_sk(sock
->sk
);
1704 /* socket options apply to the CLC socket */
1705 return smc
->clcsock
->ops
->getsockopt(smc
->clcsock
, level
, optname
,
1709 static int smc_ioctl(struct socket
*sock
, unsigned int cmd
,
1712 union smc_host_cursor cons
, urg
;
1713 struct smc_connection
*conn
;
1714 struct smc_sock
*smc
;
1717 smc
= smc_sk(sock
->sk
);
1719 lock_sock(&smc
->sk
);
1720 if (smc
->use_fallback
) {
1721 if (!smc
->clcsock
) {
1722 release_sock(&smc
->sk
);
1725 answ
= smc
->clcsock
->ops
->ioctl(smc
->clcsock
, cmd
, arg
);
1726 release_sock(&smc
->sk
);
1730 case SIOCINQ
: /* same as FIONREAD */
1731 if (smc
->sk
.sk_state
== SMC_LISTEN
) {
1732 release_sock(&smc
->sk
);
1735 if (smc
->sk
.sk_state
== SMC_INIT
||
1736 smc
->sk
.sk_state
== SMC_CLOSED
)
1739 answ
= atomic_read(&smc
->conn
.bytes_to_rcv
);
1742 /* output queue size (not send + not acked) */
1743 if (smc
->sk
.sk_state
== SMC_LISTEN
) {
1744 release_sock(&smc
->sk
);
1747 if (smc
->sk
.sk_state
== SMC_INIT
||
1748 smc
->sk
.sk_state
== SMC_CLOSED
)
1751 answ
= smc
->conn
.sndbuf_desc
->len
-
1752 atomic_read(&smc
->conn
.sndbuf_space
);
1755 /* output queue size (not send only) */
1756 if (smc
->sk
.sk_state
== SMC_LISTEN
) {
1757 release_sock(&smc
->sk
);
1760 if (smc
->sk
.sk_state
== SMC_INIT
||
1761 smc
->sk
.sk_state
== SMC_CLOSED
)
1764 answ
= smc_tx_prepared_sends(&smc
->conn
);
1767 if (smc
->sk
.sk_state
== SMC_LISTEN
) {
1768 release_sock(&smc
->sk
);
1771 if (smc
->sk
.sk_state
== SMC_INIT
||
1772 smc
->sk
.sk_state
== SMC_CLOSED
) {
1775 smc_curs_copy(&cons
, &conn
->local_tx_ctrl
.cons
, conn
);
1776 smc_curs_copy(&urg
, &conn
->urg_curs
, conn
);
1777 answ
= smc_curs_diff(conn
->rmb_desc
->len
,
1782 release_sock(&smc
->sk
);
1783 return -ENOIOCTLCMD
;
1785 release_sock(&smc
->sk
);
1787 return put_user(answ
, (int __user
*)arg
);
1790 static ssize_t
smc_sendpage(struct socket
*sock
, struct page
*page
,
1791 int offset
, size_t size
, int flags
)
1793 struct sock
*sk
= sock
->sk
;
1794 struct smc_sock
*smc
;
1799 if (sk
->sk_state
!= SMC_ACTIVE
) {
1804 if (smc
->use_fallback
)
1805 rc
= kernel_sendpage(smc
->clcsock
, page
, offset
,
1808 rc
= sock_no_sendpage(sock
, page
, offset
, size
, flags
);
1814 /* Map the affected portions of the rmbe into an spd, note the number of bytes
1815 * to splice in conn->splice_pending, and press 'go'. Delays consumer cursor
1816 * updates till whenever a respective page has been fully processed.
1817 * Note that subsequent recv() calls have to wait till all splice() processing
1820 static ssize_t
smc_splice_read(struct socket
*sock
, loff_t
*ppos
,
1821 struct pipe_inode_info
*pipe
, size_t len
,
1824 struct sock
*sk
= sock
->sk
;
1825 struct smc_sock
*smc
;
1831 if (sk
->sk_state
== SMC_INIT
||
1832 sk
->sk_state
== SMC_LISTEN
||
1833 sk
->sk_state
== SMC_CLOSED
)
1836 if (sk
->sk_state
== SMC_PEERFINCLOSEWAIT
) {
1841 if (smc
->use_fallback
) {
1842 rc
= smc
->clcsock
->ops
->splice_read(smc
->clcsock
, ppos
,
1849 if (flags
& SPLICE_F_NONBLOCK
)
1850 flags
= MSG_DONTWAIT
;
1853 rc
= smc_rx_recvmsg(smc
, NULL
, pipe
, len
, flags
);
1861 /* must look like tcp */
1862 static const struct proto_ops smc_sock_ops
= {
1864 .owner
= THIS_MODULE
,
1865 .release
= smc_release
,
1867 .connect
= smc_connect
,
1868 .socketpair
= sock_no_socketpair
,
1869 .accept
= smc_accept
,
1870 .getname
= smc_getname
,
1873 .listen
= smc_listen
,
1874 .shutdown
= smc_shutdown
,
1875 .setsockopt
= smc_setsockopt
,
1876 .getsockopt
= smc_getsockopt
,
1877 .sendmsg
= smc_sendmsg
,
1878 .recvmsg
= smc_recvmsg
,
1879 .mmap
= sock_no_mmap
,
1880 .sendpage
= smc_sendpage
,
1881 .splice_read
= smc_splice_read
,
1884 static int smc_create(struct net
*net
, struct socket
*sock
, int protocol
,
1887 int family
= (protocol
== SMCPROTO_SMC6
) ? PF_INET6
: PF_INET
;
1888 struct smc_sock
*smc
;
1892 rc
= -ESOCKTNOSUPPORT
;
1893 if (sock
->type
!= SOCK_STREAM
)
1896 rc
= -EPROTONOSUPPORT
;
1897 if (protocol
!= SMCPROTO_SMC
&& protocol
!= SMCPROTO_SMC6
)
1901 sock
->ops
= &smc_sock_ops
;
1902 sk
= smc_sock_alloc(net
, sock
, protocol
);
1906 /* create internal TCP socket for CLC handshake and fallback */
1908 smc
->use_fallback
= false; /* assume rdma capability first */
1909 smc
->fallback_rsn
= 0;
1910 rc
= sock_create_kern(net
, family
, SOCK_STREAM
, IPPROTO_TCP
,
1913 sk_common_release(sk
);
1916 smc
->sk
.sk_sndbuf
= max(smc
->clcsock
->sk
->sk_sndbuf
, SMC_BUF_MIN_SIZE
);
1917 smc
->sk
.sk_rcvbuf
= max(smc
->clcsock
->sk
->sk_rcvbuf
, SMC_BUF_MIN_SIZE
);
1923 static const struct net_proto_family smc_sock_family_ops
= {
1925 .owner
= THIS_MODULE
,
1926 .create
= smc_create
,
1929 static int __init
smc_init(void)
1933 rc
= smc_pnet_init();
1937 rc
= smc_llc_init();
1939 pr_err("%s: smc_llc_init fails with %d\n", __func__
, rc
);
1943 rc
= smc_cdc_init();
1945 pr_err("%s: smc_cdc_init fails with %d\n", __func__
, rc
);
1949 rc
= proto_register(&smc_proto
, 1);
1951 pr_err("%s: proto_register(v4) fails with %d\n", __func__
, rc
);
1955 rc
= proto_register(&smc_proto6
, 1);
1957 pr_err("%s: proto_register(v6) fails with %d\n", __func__
, rc
);
1961 rc
= sock_register(&smc_sock_family_ops
);
1963 pr_err("%s: sock_register fails with %d\n", __func__
, rc
);
1966 INIT_HLIST_HEAD(&smc_v4_hashinfo
.ht
);
1967 INIT_HLIST_HEAD(&smc_v6_hashinfo
.ht
);
1969 rc
= smc_ib_register_client();
1971 pr_err("%s: ib_register fails with %d\n", __func__
, rc
);
1975 static_branch_enable(&tcp_have_smc
);
1979 sock_unregister(PF_SMC
);
1981 proto_unregister(&smc_proto6
);
1983 proto_unregister(&smc_proto
);
1989 static void __exit
smc_exit(void)
1992 static_branch_disable(&tcp_have_smc
);
1993 smc_ib_unregister_client();
1994 sock_unregister(PF_SMC
);
1995 proto_unregister(&smc_proto6
);
1996 proto_unregister(&smc_proto
);
2000 module_init(smc_init
);
2001 module_exit(smc_exit
);
2003 MODULE_AUTHOR("Ursula Braun <ubraun@linux.vnet.ibm.com>");
2004 MODULE_DESCRIPTION("smc socket address family");
2005 MODULE_LICENSE("GPL");
2006 MODULE_ALIAS_NETPROTO(PF_SMC
);