1 // SPDX-License-Identifier: GPL-2.0-only
3 * Shared Memory Communications over RDMA (SMC-R) and RoCE
5 * AF_SMC protocol family socket handler keeping the AF_INET sock address type
6 * applies to SOCK_STREAM sockets only
7 * offers an alternative communication option for TCP-protocol sockets
8 * applicable with RoCE-cards only
10 * Initial restrictions:
11 * - support for alternate links postponed
13 * Copyright IBM Corp. 2016, 2018
15 * Author(s): Ursula Braun <ubraun@linux.vnet.ibm.com>
16 * based on prototype from Frank Blaschka
19 #define KMSG_COMPONENT "smc"
20 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
22 #include <linux/module.h>
23 #include <linux/socket.h>
24 #include <linux/workqueue.h>
26 #include <linux/sched/signal.h>
27 #include <linux/if_vlan.h>
28 #include <linux/rcupdate_wait.h>
29 #include <linux/ctype.h>
34 #include <asm/ioctls.h>
36 #include <net/net_namespace.h>
37 #include <net/netns/generic.h>
38 #include "smc_netns.h"
48 #include "smc_netlink.h"
51 #include "smc_close.h"
53 static DEFINE_MUTEX(smc_server_lgr_pending
); /* serialize link group
56 static DEFINE_MUTEX(smc_client_lgr_pending
); /* serialize link group
60 struct workqueue_struct
*smc_hs_wq
; /* wq for handshake work */
61 struct workqueue_struct
*smc_close_wq
; /* wq for close work */
63 static void smc_tcp_listen_work(struct work_struct
*);
64 static void smc_connect_work(struct work_struct
*);
66 static void smc_set_keepalive(struct sock
*sk
, int val
)
68 struct smc_sock
*smc
= smc_sk(sk
);
70 smc
->clcsock
->sk
->sk_prot
->keepalive(smc
->clcsock
->sk
, val
);
73 static struct smc_hashinfo smc_v4_hashinfo
= {
74 .lock
= __RW_LOCK_UNLOCKED(smc_v4_hashinfo
.lock
),
77 static struct smc_hashinfo smc_v6_hashinfo
= {
78 .lock
= __RW_LOCK_UNLOCKED(smc_v6_hashinfo
.lock
),
81 int smc_hash_sk(struct sock
*sk
)
83 struct smc_hashinfo
*h
= sk
->sk_prot
->h
.smc_hash
;
84 struct hlist_head
*head
;
88 write_lock_bh(&h
->lock
);
89 sk_add_node(sk
, head
);
90 sock_prot_inuse_add(sock_net(sk
), sk
->sk_prot
, 1);
91 write_unlock_bh(&h
->lock
);
95 EXPORT_SYMBOL_GPL(smc_hash_sk
);
97 void smc_unhash_sk(struct sock
*sk
)
99 struct smc_hashinfo
*h
= sk
->sk_prot
->h
.smc_hash
;
101 write_lock_bh(&h
->lock
);
102 if (sk_del_node_init(sk
))
103 sock_prot_inuse_add(sock_net(sk
), sk
->sk_prot
, -1);
104 write_unlock_bh(&h
->lock
);
106 EXPORT_SYMBOL_GPL(smc_unhash_sk
);
108 struct proto smc_proto
= {
110 .owner
= THIS_MODULE
,
111 .keepalive
= smc_set_keepalive
,
113 .unhash
= smc_unhash_sk
,
114 .obj_size
= sizeof(struct smc_sock
),
115 .h
.smc_hash
= &smc_v4_hashinfo
,
116 .slab_flags
= SLAB_TYPESAFE_BY_RCU
,
118 EXPORT_SYMBOL_GPL(smc_proto
);
120 struct proto smc_proto6
= {
122 .owner
= THIS_MODULE
,
123 .keepalive
= smc_set_keepalive
,
125 .unhash
= smc_unhash_sk
,
126 .obj_size
= sizeof(struct smc_sock
),
127 .h
.smc_hash
= &smc_v6_hashinfo
,
128 .slab_flags
= SLAB_TYPESAFE_BY_RCU
,
130 EXPORT_SYMBOL_GPL(smc_proto6
);
132 static void smc_restore_fallback_changes(struct smc_sock
*smc
)
134 if (smc
->clcsock
->file
) { /* non-accepted sockets have no file yet */
135 smc
->clcsock
->file
->private_data
= smc
->sk
.sk_socket
;
136 smc
->clcsock
->file
= NULL
;
140 static int __smc_release(struct smc_sock
*smc
)
142 struct sock
*sk
= &smc
->sk
;
145 if (!smc
->use_fallback
) {
146 rc
= smc_close_active(smc
);
147 sock_set_flag(sk
, SOCK_DEAD
);
148 sk
->sk_shutdown
|= SHUTDOWN_MASK
;
150 if (sk
->sk_state
!= SMC_LISTEN
&& sk
->sk_state
!= SMC_INIT
)
151 sock_put(sk
); /* passive closing */
152 if (sk
->sk_state
== SMC_LISTEN
) {
153 /* wake up clcsock accept */
154 rc
= kernel_sock_shutdown(smc
->clcsock
, SHUT_RDWR
);
156 sk
->sk_state
= SMC_CLOSED
;
157 sk
->sk_state_change(sk
);
158 smc_restore_fallback_changes(smc
);
161 sk
->sk_prot
->unhash(sk
);
163 if (sk
->sk_state
== SMC_CLOSED
) {
166 smc_clcsock_release(smc
);
169 if (!smc
->use_fallback
)
170 smc_conn_free(&smc
->conn
);
176 static int smc_release(struct socket
*sock
)
178 struct sock
*sk
= sock
->sk
;
179 struct smc_sock
*smc
;
185 sock_hold(sk
); /* sock_put below */
188 /* cleanup for a dangling non-blocking connect */
189 if (smc
->connect_nonblock
&& sk
->sk_state
== SMC_INIT
)
190 tcp_abort(smc
->clcsock
->sk
, ECONNABORTED
);
191 flush_work(&smc
->connect_work
);
193 if (sk
->sk_state
== SMC_LISTEN
)
194 /* smc_close_non_accepted() is called and acquires
195 * sock lock for child sockets again
197 lock_sock_nested(sk
, SINGLE_DEPTH_NESTING
);
201 rc
= __smc_release(smc
);
208 sock_put(sk
); /* sock_hold above */
209 sock_put(sk
); /* final sock_put */
214 static void smc_destruct(struct sock
*sk
)
216 if (sk
->sk_state
!= SMC_CLOSED
)
218 if (!sock_flag(sk
, SOCK_DEAD
))
221 sk_refcnt_debug_dec(sk
);
224 static struct sock
*smc_sock_alloc(struct net
*net
, struct socket
*sock
,
227 struct smc_sock
*smc
;
231 prot
= (protocol
== SMCPROTO_SMC6
) ? &smc_proto6
: &smc_proto
;
232 sk
= sk_alloc(net
, PF_SMC
, GFP_KERNEL
, prot
, 0);
236 sock_init_data(sock
, sk
); /* sets sk_refcnt to 1 */
237 sk
->sk_state
= SMC_INIT
;
238 sk
->sk_destruct
= smc_destruct
;
239 sk
->sk_protocol
= protocol
;
241 INIT_WORK(&smc
->tcp_listen_work
, smc_tcp_listen_work
);
242 INIT_WORK(&smc
->connect_work
, smc_connect_work
);
243 INIT_DELAYED_WORK(&smc
->conn
.tx_work
, smc_tx_work
);
244 INIT_LIST_HEAD(&smc
->accept_q
);
245 spin_lock_init(&smc
->accept_q_lock
);
246 spin_lock_init(&smc
->conn
.send_lock
);
247 sk
->sk_prot
->hash(sk
);
248 sk_refcnt_debug_inc(sk
);
249 mutex_init(&smc
->clcsock_release_lock
);
254 static int smc_bind(struct socket
*sock
, struct sockaddr
*uaddr
,
257 struct sockaddr_in
*addr
= (struct sockaddr_in
*)uaddr
;
258 struct sock
*sk
= sock
->sk
;
259 struct smc_sock
*smc
;
264 /* replicate tests from inet_bind(), to be safe wrt. future changes */
266 if (addr_len
< sizeof(struct sockaddr_in
))
270 if (addr
->sin_family
!= AF_INET
&&
271 addr
->sin_family
!= AF_INET6
&&
272 addr
->sin_family
!= AF_UNSPEC
)
274 /* accept AF_UNSPEC (mapped to AF_INET) only if s_addr is INADDR_ANY */
275 if (addr
->sin_family
== AF_UNSPEC
&&
276 addr
->sin_addr
.s_addr
!= htonl(INADDR_ANY
))
281 /* Check if socket is already active */
283 if (sk
->sk_state
!= SMC_INIT
|| smc
->connect_nonblock
)
286 smc
->clcsock
->sk
->sk_reuse
= sk
->sk_reuse
;
287 rc
= kernel_bind(smc
->clcsock
, uaddr
, addr_len
);
295 static void smc_copy_sock_settings(struct sock
*nsk
, struct sock
*osk
,
298 /* options we don't get control via setsockopt for */
299 nsk
->sk_type
= osk
->sk_type
;
300 nsk
->sk_sndbuf
= osk
->sk_sndbuf
;
301 nsk
->sk_rcvbuf
= osk
->sk_rcvbuf
;
302 nsk
->sk_sndtimeo
= osk
->sk_sndtimeo
;
303 nsk
->sk_rcvtimeo
= osk
->sk_rcvtimeo
;
304 nsk
->sk_mark
= osk
->sk_mark
;
305 nsk
->sk_priority
= osk
->sk_priority
;
306 nsk
->sk_rcvlowat
= osk
->sk_rcvlowat
;
307 nsk
->sk_bound_dev_if
= osk
->sk_bound_dev_if
;
308 nsk
->sk_err
= osk
->sk_err
;
310 nsk
->sk_flags
&= ~mask
;
311 nsk
->sk_flags
|= osk
->sk_flags
& mask
;
314 #define SK_FLAGS_SMC_TO_CLC ((1UL << SOCK_URGINLINE) | \
315 (1UL << SOCK_KEEPOPEN) | \
316 (1UL << SOCK_LINGER) | \
317 (1UL << SOCK_BROADCAST) | \
318 (1UL << SOCK_TIMESTAMP) | \
319 (1UL << SOCK_DBG) | \
320 (1UL << SOCK_RCVTSTAMP) | \
321 (1UL << SOCK_RCVTSTAMPNS) | \
322 (1UL << SOCK_LOCALROUTE) | \
323 (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE) | \
324 (1UL << SOCK_RXQ_OVFL) | \
325 (1UL << SOCK_WIFI_STATUS) | \
326 (1UL << SOCK_NOFCS) | \
327 (1UL << SOCK_FILTER_LOCKED) | \
328 (1UL << SOCK_TSTAMP_NEW))
329 /* copy only relevant settings and flags of SOL_SOCKET level from smc to
330 * clc socket (since smc is not called for these options from net/core)
332 static void smc_copy_sock_settings_to_clc(struct smc_sock
*smc
)
334 smc_copy_sock_settings(smc
->clcsock
->sk
, &smc
->sk
, SK_FLAGS_SMC_TO_CLC
);
337 #define SK_FLAGS_CLC_TO_SMC ((1UL << SOCK_URGINLINE) | \
338 (1UL << SOCK_KEEPOPEN) | \
339 (1UL << SOCK_LINGER) | \
341 /* copy only settings and flags relevant for smc from clc to smc socket */
342 static void smc_copy_sock_settings_to_smc(struct smc_sock
*smc
)
344 smc_copy_sock_settings(&smc
->sk
, smc
->clcsock
->sk
, SK_FLAGS_CLC_TO_SMC
);
347 /* register the new rmb on all links */
348 static int smcr_lgr_reg_rmbs(struct smc_link
*link
,
349 struct smc_buf_desc
*rmb_desc
)
351 struct smc_link_group
*lgr
= link
->lgr
;
354 rc
= smc_llc_flow_initiate(lgr
, SMC_LLC_FLOW_RKEY
);
357 /* protect against parallel smc_llc_cli_rkey_exchange() and
358 * parallel smcr_link_reg_rmb()
360 mutex_lock(&lgr
->llc_conf_mutex
);
361 for (i
= 0; i
< SMC_LINKS_PER_LGR_MAX
; i
++) {
362 if (!smc_link_active(&lgr
->lnk
[i
]))
364 rc
= smcr_link_reg_rmb(&lgr
->lnk
[i
], rmb_desc
);
369 /* exchange confirm_rkey msg with peer */
370 rc
= smc_llc_do_confirm_rkey(link
, rmb_desc
);
375 rmb_desc
->is_conf_rkey
= true;
377 mutex_unlock(&lgr
->llc_conf_mutex
);
378 smc_llc_flow_stop(lgr
, &lgr
->llc_flow_lcl
);
382 static int smcr_clnt_conf_first_link(struct smc_sock
*smc
)
384 struct smc_link
*link
= smc
->conn
.lnk
;
385 struct smc_llc_qentry
*qentry
;
388 /* receive CONFIRM LINK request from server over RoCE fabric */
389 qentry
= smc_llc_wait(link
->lgr
, NULL
, SMC_LLC_WAIT_TIME
,
390 SMC_LLC_CONFIRM_LINK
);
392 struct smc_clc_msg_decline dclc
;
394 rc
= smc_clc_wait_msg(smc
, &dclc
, sizeof(dclc
),
395 SMC_CLC_DECLINE
, CLC_WAIT_TIME_SHORT
);
396 return rc
== -EAGAIN
? SMC_CLC_DECL_TIMEOUT_CL
: rc
;
398 smc_llc_save_peer_uid(qentry
);
399 rc
= smc_llc_eval_conf_link(qentry
, SMC_LLC_REQ
);
400 smc_llc_flow_qentry_del(&link
->lgr
->llc_flow_lcl
);
402 return SMC_CLC_DECL_RMBE_EC
;
404 rc
= smc_ib_modify_qp_rts(link
);
406 return SMC_CLC_DECL_ERR_RDYLNK
;
408 smc_wr_remember_qp_attr(link
);
410 if (smcr_link_reg_rmb(link
, smc
->conn
.rmb_desc
))
411 return SMC_CLC_DECL_ERR_REGRMB
;
413 /* confirm_rkey is implicit on 1st contact */
414 smc
->conn
.rmb_desc
->is_conf_rkey
= true;
416 /* send CONFIRM LINK response over RoCE fabric */
417 rc
= smc_llc_send_confirm_link(link
, SMC_LLC_RESP
);
419 return SMC_CLC_DECL_TIMEOUT_CL
;
421 smc_llc_link_active(link
);
422 smcr_lgr_set_type(link
->lgr
, SMC_LGR_SINGLE
);
424 /* optional 2nd link, receive ADD LINK request from server */
425 qentry
= smc_llc_wait(link
->lgr
, NULL
, SMC_LLC_WAIT_TIME
,
428 struct smc_clc_msg_decline dclc
;
430 rc
= smc_clc_wait_msg(smc
, &dclc
, sizeof(dclc
),
431 SMC_CLC_DECLINE
, CLC_WAIT_TIME_SHORT
);
433 rc
= 0; /* no DECLINE received, go with one link */
436 smc_llc_flow_qentry_clr(&link
->lgr
->llc_flow_lcl
);
437 smc_llc_cli_add_link(link
, qentry
);
441 static void smcr_conn_save_peer_info(struct smc_sock
*smc
,
442 struct smc_clc_msg_accept_confirm
*clc
)
444 int bufsize
= smc_uncompress_bufsize(clc
->r0
.rmbe_size
);
446 smc
->conn
.peer_rmbe_idx
= clc
->r0
.rmbe_idx
;
447 smc
->conn
.local_tx_ctrl
.token
= ntohl(clc
->r0
.rmbe_alert_token
);
448 smc
->conn
.peer_rmbe_size
= bufsize
;
449 atomic_set(&smc
->conn
.peer_rmbe_space
, smc
->conn
.peer_rmbe_size
);
450 smc
->conn
.tx_off
= bufsize
* (smc
->conn
.peer_rmbe_idx
- 1);
453 static bool smc_isascii(char *hostname
)
457 for (i
= 0; i
< SMC_MAX_HOSTNAME_LEN
; i
++)
458 if (!isascii(hostname
[i
]))
463 static void smcd_conn_save_peer_info(struct smc_sock
*smc
,
464 struct smc_clc_msg_accept_confirm
*clc
)
466 int bufsize
= smc_uncompress_bufsize(clc
->d0
.dmbe_size
);
468 smc
->conn
.peer_rmbe_idx
= clc
->d0
.dmbe_idx
;
469 smc
->conn
.peer_token
= clc
->d0
.token
;
470 /* msg header takes up space in the buffer */
471 smc
->conn
.peer_rmbe_size
= bufsize
- sizeof(struct smcd_cdc_msg
);
472 atomic_set(&smc
->conn
.peer_rmbe_space
, smc
->conn
.peer_rmbe_size
);
473 smc
->conn
.tx_off
= bufsize
* smc
->conn
.peer_rmbe_idx
;
474 if (clc
->hdr
.version
> SMC_V1
&&
475 (clc
->hdr
.typev2
& SMC_FIRST_CONTACT_MASK
)) {
476 struct smc_clc_msg_accept_confirm_v2
*clc_v2
=
477 (struct smc_clc_msg_accept_confirm_v2
*)clc
;
478 struct smc_clc_first_contact_ext
*fce
=
479 (struct smc_clc_first_contact_ext
*)
480 (((u8
*)clc_v2
) + sizeof(*clc_v2
));
482 memcpy(smc
->conn
.lgr
->negotiated_eid
, clc_v2
->eid
,
484 smc
->conn
.lgr
->peer_os
= fce
->os_type
;
485 smc
->conn
.lgr
->peer_smc_release
= fce
->release
;
486 if (smc_isascii(fce
->hostname
))
487 memcpy(smc
->conn
.lgr
->peer_hostname
, fce
->hostname
,
488 SMC_MAX_HOSTNAME_LEN
);
492 static void smc_conn_save_peer_info(struct smc_sock
*smc
,
493 struct smc_clc_msg_accept_confirm
*clc
)
495 if (smc
->conn
.lgr
->is_smcd
)
496 smcd_conn_save_peer_info(smc
, clc
);
498 smcr_conn_save_peer_info(smc
, clc
);
501 static void smc_link_save_peer_info(struct smc_link
*link
,
502 struct smc_clc_msg_accept_confirm
*clc
)
504 link
->peer_qpn
= ntoh24(clc
->r0
.qpn
);
505 memcpy(link
->peer_gid
, clc
->r0
.lcl
.gid
, SMC_GID_SIZE
);
506 memcpy(link
->peer_mac
, clc
->r0
.lcl
.mac
, sizeof(link
->peer_mac
));
507 link
->peer_psn
= ntoh24(clc
->r0
.psn
);
508 link
->peer_mtu
= clc
->r0
.qp_mtu
;
511 static void smc_switch_to_fallback(struct smc_sock
*smc
)
513 smc
->use_fallback
= true;
514 if (smc
->sk
.sk_socket
&& smc
->sk
.sk_socket
->file
) {
515 smc
->clcsock
->file
= smc
->sk
.sk_socket
->file
;
516 smc
->clcsock
->file
->private_data
= smc
->clcsock
;
517 smc
->clcsock
->wq
.fasync_list
=
518 smc
->sk
.sk_socket
->wq
.fasync_list
;
522 /* fall back during connect */
523 static int smc_connect_fallback(struct smc_sock
*smc
, int reason_code
)
525 smc_switch_to_fallback(smc
);
526 smc
->fallback_rsn
= reason_code
;
527 smc_copy_sock_settings_to_clc(smc
);
528 smc
->connect_nonblock
= 0;
529 if (smc
->sk
.sk_state
== SMC_INIT
)
530 smc
->sk
.sk_state
= SMC_ACTIVE
;
534 /* decline and fall back during connect */
535 static int smc_connect_decline_fallback(struct smc_sock
*smc
, int reason_code
,
540 if (reason_code
< 0) { /* error, fallback is not possible */
541 if (smc
->sk
.sk_state
== SMC_INIT
)
542 sock_put(&smc
->sk
); /* passive closing */
545 if (reason_code
!= SMC_CLC_DECL_PEERDECL
) {
546 rc
= smc_clc_send_decline(smc
, reason_code
, version
);
548 if (smc
->sk
.sk_state
== SMC_INIT
)
549 sock_put(&smc
->sk
); /* passive closing */
553 return smc_connect_fallback(smc
, reason_code
);
556 static void smc_conn_abort(struct smc_sock
*smc
, int local_first
)
559 smc_lgr_cleanup_early(&smc
->conn
);
561 smc_conn_free(&smc
->conn
);
564 /* check if there is a rdma device available for this connection. */
565 /* called for connect and listen */
566 static int smc_find_rdma_device(struct smc_sock
*smc
, struct smc_init_info
*ini
)
568 /* PNET table look up: search active ib_device and port
569 * within same PNETID that also contains the ethernet device
570 * used for the internal TCP socket
572 smc_pnet_find_roce_resource(smc
->clcsock
->sk
, ini
);
574 return SMC_CLC_DECL_NOSMCRDEV
;
578 /* check if there is an ISM device available for this connection. */
579 /* called for connect and listen */
580 static int smc_find_ism_device(struct smc_sock
*smc
, struct smc_init_info
*ini
)
582 /* Find ISM device with same PNETID as connecting interface */
583 smc_pnet_find_ism_resource(smc
->clcsock
->sk
, ini
);
584 if (!ini
->ism_dev
[0])
585 return SMC_CLC_DECL_NOSMCDDEV
;
587 ini
->ism_chid
[0] = smc_ism_get_chid(ini
->ism_dev
[0]);
591 /* is chid unique for the ism devices that are already determined? */
592 static bool smc_find_ism_v2_is_unique_chid(u16 chid
, struct smc_init_info
*ini
,
595 int i
= (!ini
->ism_dev
[0]) ? 1 : 0;
598 if (ini
->ism_chid
[i
] == chid
)
603 /* determine possible V2 ISM devices (either without PNETID or with PNETID plus
604 * PNETID matching net_device)
606 static int smc_find_ism_v2_device_clnt(struct smc_sock
*smc
,
607 struct smc_init_info
*ini
)
609 int rc
= SMC_CLC_DECL_NOSMCDDEV
;
610 struct smcd_dev
*smcd
;
614 if (smcd_indicated(ini
->smc_type_v1
))
615 rc
= 0; /* already initialized for V1 */
616 mutex_lock(&smcd_dev_list
.mutex
);
617 list_for_each_entry(smcd
, &smcd_dev_list
.list
, list
) {
618 if (smcd
->going_away
|| smcd
== ini
->ism_dev
[0])
620 chid
= smc_ism_get_chid(smcd
);
621 if (!smc_find_ism_v2_is_unique_chid(chid
, ini
, i
))
623 if (!smc_pnet_is_pnetid_set(smcd
->pnetid
) ||
624 smc_pnet_is_ndev_pnetid(sock_net(&smc
->sk
), smcd
->pnetid
)) {
625 ini
->ism_dev
[i
] = smcd
;
626 ini
->ism_chid
[i
] = chid
;
630 if (i
> SMC_MAX_ISM_DEVS
)
634 mutex_unlock(&smcd_dev_list
.mutex
);
635 ini
->ism_offered_cnt
= i
- 1;
636 if (!ini
->ism_dev
[0] && !ini
->ism_dev
[1])
637 ini
->smcd_version
= 0;
642 /* Check for VLAN ID and register it on ISM device just for CLC handshake */
643 static int smc_connect_ism_vlan_setup(struct smc_sock
*smc
,
644 struct smc_init_info
*ini
)
646 if (ini
->vlan_id
&& smc_ism_get_vlan(ini
->ism_dev
[0], ini
->vlan_id
))
647 return SMC_CLC_DECL_ISMVLANERR
;
651 static int smc_find_proposal_devices(struct smc_sock
*smc
,
652 struct smc_init_info
*ini
)
656 /* check if there is an ism device available */
657 if (ini
->smcd_version
& SMC_V1
) {
658 if (smc_find_ism_device(smc
, ini
) ||
659 smc_connect_ism_vlan_setup(smc
, ini
)) {
660 if (ini
->smc_type_v1
== SMC_TYPE_B
)
661 ini
->smc_type_v1
= SMC_TYPE_R
;
663 ini
->smc_type_v1
= SMC_TYPE_N
;
664 } /* else ISM V1 is supported for this connection */
665 if (smc_find_rdma_device(smc
, ini
)) {
666 if (ini
->smc_type_v1
== SMC_TYPE_B
)
667 ini
->smc_type_v1
= SMC_TYPE_D
;
669 ini
->smc_type_v1
= SMC_TYPE_N
;
670 } /* else RDMA is supported for this connection */
672 if (smc_ism_is_v2_capable() && smc_find_ism_v2_device_clnt(smc
, ini
))
673 ini
->smc_type_v2
= SMC_TYPE_N
;
675 /* if neither ISM nor RDMA are supported, fallback */
676 if (!smcr_indicated(ini
->smc_type_v1
) &&
677 ini
->smc_type_v1
== SMC_TYPE_N
&& ini
->smc_type_v2
== SMC_TYPE_N
)
678 rc
= SMC_CLC_DECL_NOSMCDEV
;
683 /* cleanup temporary VLAN ID registration used for CLC handshake. If ISM is
684 * used, the VLAN ID will be registered again during the connection setup.
686 static int smc_connect_ism_vlan_cleanup(struct smc_sock
*smc
,
687 struct smc_init_info
*ini
)
689 if (!smcd_indicated(ini
->smc_type_v1
))
691 if (ini
->vlan_id
&& smc_ism_put_vlan(ini
->ism_dev
[0], ini
->vlan_id
))
692 return SMC_CLC_DECL_CNFERR
;
696 #define SMC_CLC_MAX_ACCEPT_LEN \
697 (sizeof(struct smc_clc_msg_accept_confirm_v2) + \
698 sizeof(struct smc_clc_first_contact_ext) + \
699 sizeof(struct smc_clc_msg_trail))
701 /* CLC handshake during connect */
702 static int smc_connect_clc(struct smc_sock
*smc
,
703 struct smc_clc_msg_accept_confirm_v2
*aclc2
,
704 struct smc_init_info
*ini
)
708 /* do inband token exchange */
709 rc
= smc_clc_send_proposal(smc
, ini
);
712 /* receive SMC Accept CLC message */
713 return smc_clc_wait_msg(smc
, aclc2
, SMC_CLC_MAX_ACCEPT_LEN
,
714 SMC_CLC_ACCEPT
, CLC_WAIT_TIME
);
717 /* setup for RDMA connection of client */
718 static int smc_connect_rdma(struct smc_sock
*smc
,
719 struct smc_clc_msg_accept_confirm
*aclc
,
720 struct smc_init_info
*ini
)
722 int i
, reason_code
= 0;
723 struct smc_link
*link
;
725 ini
->is_smcd
= false;
726 ini
->ib_lcl
= &aclc
->r0
.lcl
;
727 ini
->ib_clcqpn
= ntoh24(aclc
->r0
.qpn
);
728 ini
->first_contact_peer
= aclc
->hdr
.typev2
& SMC_FIRST_CONTACT_MASK
;
730 mutex_lock(&smc_client_lgr_pending
);
731 reason_code
= smc_conn_create(smc
, ini
);
733 mutex_unlock(&smc_client_lgr_pending
);
737 smc_conn_save_peer_info(smc
, aclc
);
739 if (ini
->first_contact_local
) {
740 link
= smc
->conn
.lnk
;
742 /* set link that was assigned by server */
744 for (i
= 0; i
< SMC_LINKS_PER_LGR_MAX
; i
++) {
745 struct smc_link
*l
= &smc
->conn
.lgr
->lnk
[i
];
747 if (l
->peer_qpn
== ntoh24(aclc
->r0
.qpn
) &&
748 !memcmp(l
->peer_gid
, &aclc
->r0
.lcl
.gid
,
750 !memcmp(l
->peer_mac
, &aclc
->r0
.lcl
.mac
,
751 sizeof(l
->peer_mac
))) {
757 reason_code
= SMC_CLC_DECL_NOSRVLINK
;
760 smc
->conn
.lnk
= link
;
763 /* create send buffer and rmb */
764 if (smc_buf_create(smc
, false)) {
765 reason_code
= SMC_CLC_DECL_MEM
;
769 if (ini
->first_contact_local
)
770 smc_link_save_peer_info(link
, aclc
);
772 if (smc_rmb_rtoken_handling(&smc
->conn
, link
, aclc
)) {
773 reason_code
= SMC_CLC_DECL_ERR_RTOK
;
780 if (ini
->first_contact_local
) {
781 if (smc_ib_ready_link(link
)) {
782 reason_code
= SMC_CLC_DECL_ERR_RDYLNK
;
786 if (smcr_lgr_reg_rmbs(link
, smc
->conn
.rmb_desc
)) {
787 reason_code
= SMC_CLC_DECL_ERR_REGRMB
;
791 smc_rmb_sync_sg_for_device(&smc
->conn
);
793 reason_code
= smc_clc_send_confirm(smc
, ini
->first_contact_local
,
800 if (ini
->first_contact_local
) {
801 /* QP confirmation over RoCE fabric */
802 smc_llc_flow_initiate(link
->lgr
, SMC_LLC_FLOW_ADD_LINK
);
803 reason_code
= smcr_clnt_conf_first_link(smc
);
804 smc_llc_flow_stop(link
->lgr
, &link
->lgr
->llc_flow_lcl
);
808 mutex_unlock(&smc_client_lgr_pending
);
810 smc_copy_sock_settings_to_clc(smc
);
811 smc
->connect_nonblock
= 0;
812 if (smc
->sk
.sk_state
== SMC_INIT
)
813 smc
->sk
.sk_state
= SMC_ACTIVE
;
817 smc_conn_abort(smc
, ini
->first_contact_local
);
818 mutex_unlock(&smc_client_lgr_pending
);
819 smc
->connect_nonblock
= 0;
824 /* The server has chosen one of the proposed ISM devices for the communication.
825 * Determine from the CHID of the received CLC ACCEPT the ISM device chosen.
828 smc_v2_determine_accepted_chid(struct smc_clc_msg_accept_confirm_v2
*aclc
,
829 struct smc_init_info
*ini
)
833 for (i
= 0; i
< ini
->ism_offered_cnt
+ 1; i
++) {
834 if (ini
->ism_chid
[i
] == ntohs(aclc
->chid
)) {
835 ini
->ism_selected
= i
;
843 /* setup for ISM connection of client */
844 static int smc_connect_ism(struct smc_sock
*smc
,
845 struct smc_clc_msg_accept_confirm
*aclc
,
846 struct smc_init_info
*ini
)
851 ini
->first_contact_peer
= aclc
->hdr
.typev2
& SMC_FIRST_CONTACT_MASK
;
853 if (aclc
->hdr
.version
== SMC_V2
) {
854 struct smc_clc_msg_accept_confirm_v2
*aclc_v2
=
855 (struct smc_clc_msg_accept_confirm_v2
*)aclc
;
857 rc
= smc_v2_determine_accepted_chid(aclc_v2
, ini
);
861 ini
->ism_peer_gid
[ini
->ism_selected
] = aclc
->d0
.gid
;
863 /* there is only one lgr role for SMC-D; use server lock */
864 mutex_lock(&smc_server_lgr_pending
);
865 rc
= smc_conn_create(smc
, ini
);
867 mutex_unlock(&smc_server_lgr_pending
);
871 /* Create send and receive buffers */
872 rc
= smc_buf_create(smc
, true);
874 rc
= (rc
== -ENOSPC
) ? SMC_CLC_DECL_MAX_DMB
: SMC_CLC_DECL_MEM
;
878 smc_conn_save_peer_info(smc
, aclc
);
883 rc
= smc_clc_send_confirm(smc
, ini
->first_contact_local
,
887 mutex_unlock(&smc_server_lgr_pending
);
889 smc_copy_sock_settings_to_clc(smc
);
890 smc
->connect_nonblock
= 0;
891 if (smc
->sk
.sk_state
== SMC_INIT
)
892 smc
->sk
.sk_state
= SMC_ACTIVE
;
896 smc_conn_abort(smc
, ini
->first_contact_local
);
897 mutex_unlock(&smc_server_lgr_pending
);
898 smc
->connect_nonblock
= 0;
903 /* check if received accept type and version matches a proposed one */
904 static int smc_connect_check_aclc(struct smc_init_info
*ini
,
905 struct smc_clc_msg_accept_confirm
*aclc
)
907 if ((aclc
->hdr
.typev1
== SMC_TYPE_R
&&
908 !smcr_indicated(ini
->smc_type_v1
)) ||
909 (aclc
->hdr
.typev1
== SMC_TYPE_D
&&
910 ((!smcd_indicated(ini
->smc_type_v1
) &&
911 !smcd_indicated(ini
->smc_type_v2
)) ||
912 (aclc
->hdr
.version
== SMC_V1
&&
913 !smcd_indicated(ini
->smc_type_v1
)) ||
914 (aclc
->hdr
.version
== SMC_V2
&&
915 !smcd_indicated(ini
->smc_type_v2
)))))
916 return SMC_CLC_DECL_MODEUNSUPP
;
921 /* perform steps before actually connecting */
922 static int __smc_connect(struct smc_sock
*smc
)
924 u8 version
= smc_ism_is_v2_capable() ? SMC_V2
: SMC_V1
;
925 struct smc_clc_msg_accept_confirm_v2
*aclc2
;
926 struct smc_clc_msg_accept_confirm
*aclc
;
927 struct smc_init_info
*ini
= NULL
;
931 if (smc
->use_fallback
)
932 return smc_connect_fallback(smc
, smc
->fallback_rsn
);
934 /* if peer has not signalled SMC-capability, fall back */
935 if (!tcp_sk(smc
->clcsock
->sk
)->syn_smc
)
936 return smc_connect_fallback(smc
, SMC_CLC_DECL_PEERNOSMC
);
938 /* IPSec connections opt out of SMC optimizations */
939 if (using_ipsec(smc
))
940 return smc_connect_decline_fallback(smc
, SMC_CLC_DECL_IPSEC
,
943 ini
= kzalloc(sizeof(*ini
), GFP_KERNEL
);
945 return smc_connect_decline_fallback(smc
, SMC_CLC_DECL_MEM
,
948 ini
->smcd_version
= SMC_V1
;
949 ini
->smcd_version
|= smc_ism_is_v2_capable() ? SMC_V2
: 0;
950 ini
->smc_type_v1
= SMC_TYPE_B
;
951 ini
->smc_type_v2
= smc_ism_is_v2_capable() ? SMC_TYPE_D
: SMC_TYPE_N
;
953 /* get vlan id from IP device */
954 if (smc_vlan_by_tcpsk(smc
->clcsock
, ini
)) {
955 ini
->smcd_version
&= ~SMC_V1
;
956 ini
->smc_type_v1
= SMC_TYPE_N
;
957 if (!ini
->smcd_version
) {
958 rc
= SMC_CLC_DECL_GETVLANERR
;
963 rc
= smc_find_proposal_devices(smc
, ini
);
967 buf
= kzalloc(SMC_CLC_MAX_ACCEPT_LEN
, GFP_KERNEL
);
969 rc
= SMC_CLC_DECL_MEM
;
972 aclc2
= (struct smc_clc_msg_accept_confirm_v2
*)buf
;
973 aclc
= (struct smc_clc_msg_accept_confirm
*)aclc2
;
975 /* perform CLC handshake */
976 rc
= smc_connect_clc(smc
, aclc2
, ini
);
980 /* check if smc modes and versions of CLC proposal and accept match */
981 rc
= smc_connect_check_aclc(ini
, aclc
);
982 version
= aclc
->hdr
.version
== SMC_V1
? SMC_V1
: SMC_V2
;
983 ini
->smcd_version
= version
;
987 /* depending on previous steps, connect using rdma or ism */
988 if (aclc
->hdr
.typev1
== SMC_TYPE_R
)
989 rc
= smc_connect_rdma(smc
, aclc
, ini
);
990 else if (aclc
->hdr
.typev1
== SMC_TYPE_D
)
991 rc
= smc_connect_ism(smc
, aclc
, ini
);
995 smc_connect_ism_vlan_cleanup(smc
, ini
);
1001 smc_connect_ism_vlan_cleanup(smc
, ini
);
1005 return smc_connect_decline_fallback(smc
, rc
, version
);
1008 static void smc_connect_work(struct work_struct
*work
)
1010 struct smc_sock
*smc
= container_of(work
, struct smc_sock
,
1012 long timeo
= smc
->sk
.sk_sndtimeo
;
1016 timeo
= MAX_SCHEDULE_TIMEOUT
;
1017 lock_sock(smc
->clcsock
->sk
);
1018 if (smc
->clcsock
->sk
->sk_err
) {
1019 smc
->sk
.sk_err
= smc
->clcsock
->sk
->sk_err
;
1020 } else if ((1 << smc
->clcsock
->sk
->sk_state
) &
1021 (TCPF_SYN_SENT
| TCP_SYN_RECV
)) {
1022 rc
= sk_stream_wait_connect(smc
->clcsock
->sk
, &timeo
);
1023 if ((rc
== -EPIPE
) &&
1024 ((1 << smc
->clcsock
->sk
->sk_state
) &
1025 (TCPF_ESTABLISHED
| TCPF_CLOSE_WAIT
)))
1028 release_sock(smc
->clcsock
->sk
);
1029 lock_sock(&smc
->sk
);
1030 if (rc
!= 0 || smc
->sk
.sk_err
) {
1031 smc
->sk
.sk_state
= SMC_CLOSED
;
1032 if (rc
== -EPIPE
|| rc
== -EAGAIN
)
1033 smc
->sk
.sk_err
= EPIPE
;
1034 else if (signal_pending(current
))
1035 smc
->sk
.sk_err
= -sock_intr_errno(timeo
);
1036 sock_put(&smc
->sk
); /* passive closing */
1040 rc
= __smc_connect(smc
);
1042 smc
->sk
.sk_err
= -rc
;
1045 if (!sock_flag(&smc
->sk
, SOCK_DEAD
)) {
1046 if (smc
->sk
.sk_err
) {
1047 smc
->sk
.sk_state_change(&smc
->sk
);
1048 } else { /* allow polling before and after fallback decision */
1049 smc
->clcsock
->sk
->sk_write_space(smc
->clcsock
->sk
);
1050 smc
->sk
.sk_write_space(&smc
->sk
);
1053 release_sock(&smc
->sk
);
1056 static int smc_connect(struct socket
*sock
, struct sockaddr
*addr
,
1057 int alen
, int flags
)
1059 struct sock
*sk
= sock
->sk
;
1060 struct smc_sock
*smc
;
1065 /* separate smc parameter checking to be safe */
1066 if (alen
< sizeof(addr
->sa_family
))
1068 if (addr
->sa_family
!= AF_INET
&& addr
->sa_family
!= AF_INET6
)
1072 switch (sk
->sk_state
) {
1083 smc_copy_sock_settings_to_clc(smc
);
1084 tcp_sk(smc
->clcsock
->sk
)->syn_smc
= 1;
1085 if (smc
->connect_nonblock
) {
1089 rc
= kernel_connect(smc
->clcsock
, addr
, alen
, flags
);
1090 if (rc
&& rc
!= -EINPROGRESS
)
1093 sock_hold(&smc
->sk
); /* sock put in passive closing */
1094 if (smc
->use_fallback
)
1096 if (flags
& O_NONBLOCK
) {
1097 if (queue_work(smc_hs_wq
, &smc
->connect_work
))
1098 smc
->connect_nonblock
= 1;
1101 rc
= __smc_connect(smc
);
1105 rc
= 0; /* success cases including fallback */
1114 static int smc_clcsock_accept(struct smc_sock
*lsmc
, struct smc_sock
**new_smc
)
1116 struct socket
*new_clcsock
= NULL
;
1117 struct sock
*lsk
= &lsmc
->sk
;
1118 struct sock
*new_sk
;
1122 new_sk
= smc_sock_alloc(sock_net(lsk
), NULL
, lsk
->sk_protocol
);
1125 lsk
->sk_err
= ENOMEM
;
1130 *new_smc
= smc_sk(new_sk
);
1132 mutex_lock(&lsmc
->clcsock_release_lock
);
1134 rc
= kernel_accept(lsmc
->clcsock
, &new_clcsock
, SOCK_NONBLOCK
);
1135 mutex_unlock(&lsmc
->clcsock_release_lock
);
1137 if (rc
< 0 && rc
!= -EAGAIN
)
1139 if (rc
< 0 || lsk
->sk_state
== SMC_CLOSED
) {
1140 new_sk
->sk_prot
->unhash(new_sk
);
1142 sock_release(new_clcsock
);
1143 new_sk
->sk_state
= SMC_CLOSED
;
1144 sock_set_flag(new_sk
, SOCK_DEAD
);
1145 sock_put(new_sk
); /* final */
1150 /* new clcsock has inherited the smc listen-specific sk_data_ready
1151 * function; switch it back to the original sk_data_ready function
1153 new_clcsock
->sk
->sk_data_ready
= lsmc
->clcsk_data_ready
;
1154 (*new_smc
)->clcsock
= new_clcsock
;
1159 /* add a just created sock to the accept queue of the listen sock as
1160 * candidate for a following socket accept call from user space
1162 static void smc_accept_enqueue(struct sock
*parent
, struct sock
*sk
)
1164 struct smc_sock
*par
= smc_sk(parent
);
1166 sock_hold(sk
); /* sock_put in smc_accept_unlink () */
1167 spin_lock(&par
->accept_q_lock
);
1168 list_add_tail(&smc_sk(sk
)->accept_q
, &par
->accept_q
);
1169 spin_unlock(&par
->accept_q_lock
);
1170 sk_acceptq_added(parent
);
1173 /* remove a socket from the accept queue of its parental listening socket */
1174 static void smc_accept_unlink(struct sock
*sk
)
1176 struct smc_sock
*par
= smc_sk(sk
)->listen_smc
;
1178 spin_lock(&par
->accept_q_lock
);
1179 list_del_init(&smc_sk(sk
)->accept_q
);
1180 spin_unlock(&par
->accept_q_lock
);
1181 sk_acceptq_removed(&smc_sk(sk
)->listen_smc
->sk
);
1182 sock_put(sk
); /* sock_hold in smc_accept_enqueue */
1185 /* remove a sock from the accept queue to bind it to a new socket created
1186 * for a socket accept call from user space
1188 struct sock
*smc_accept_dequeue(struct sock
*parent
,
1189 struct socket
*new_sock
)
1191 struct smc_sock
*isk
, *n
;
1192 struct sock
*new_sk
;
1194 list_for_each_entry_safe(isk
, n
, &smc_sk(parent
)->accept_q
, accept_q
) {
1195 new_sk
= (struct sock
*)isk
;
1197 smc_accept_unlink(new_sk
);
1198 if (new_sk
->sk_state
== SMC_CLOSED
) {
1199 new_sk
->sk_prot
->unhash(new_sk
);
1201 sock_release(isk
->clcsock
);
1202 isk
->clcsock
= NULL
;
1204 sock_put(new_sk
); /* final */
1208 sock_graft(new_sk
, new_sock
);
1209 if (isk
->use_fallback
) {
1210 smc_sk(new_sk
)->clcsock
->file
= new_sock
->file
;
1211 isk
->clcsock
->file
->private_data
= isk
->clcsock
;
1219 /* clean up for a created but never accepted sock */
1220 void smc_close_non_accepted(struct sock
*sk
)
1222 struct smc_sock
*smc
= smc_sk(sk
);
1224 sock_hold(sk
); /* sock_put below */
1226 if (!sk
->sk_lingertime
)
1227 /* wait for peer closing */
1228 sk
->sk_lingertime
= SMC_MAX_STREAM_WAIT_TIMEOUT
;
1231 sock_put(sk
); /* sock_hold above */
1232 sock_put(sk
); /* final sock_put */
1235 static int smcr_serv_conf_first_link(struct smc_sock
*smc
)
1237 struct smc_link
*link
= smc
->conn
.lnk
;
1238 struct smc_llc_qentry
*qentry
;
1241 if (smcr_link_reg_rmb(link
, smc
->conn
.rmb_desc
))
1242 return SMC_CLC_DECL_ERR_REGRMB
;
1244 /* send CONFIRM LINK request to client over the RoCE fabric */
1245 rc
= smc_llc_send_confirm_link(link
, SMC_LLC_REQ
);
1247 return SMC_CLC_DECL_TIMEOUT_CL
;
1249 /* receive CONFIRM LINK response from client over the RoCE fabric */
1250 qentry
= smc_llc_wait(link
->lgr
, link
, SMC_LLC_WAIT_TIME
,
1251 SMC_LLC_CONFIRM_LINK
);
1253 struct smc_clc_msg_decline dclc
;
1255 rc
= smc_clc_wait_msg(smc
, &dclc
, sizeof(dclc
),
1256 SMC_CLC_DECLINE
, CLC_WAIT_TIME_SHORT
);
1257 return rc
== -EAGAIN
? SMC_CLC_DECL_TIMEOUT_CL
: rc
;
1259 smc_llc_save_peer_uid(qentry
);
1260 rc
= smc_llc_eval_conf_link(qentry
, SMC_LLC_RESP
);
1261 smc_llc_flow_qentry_del(&link
->lgr
->llc_flow_lcl
);
1263 return SMC_CLC_DECL_RMBE_EC
;
1265 /* confirm_rkey is implicit on 1st contact */
1266 smc
->conn
.rmb_desc
->is_conf_rkey
= true;
1268 smc_llc_link_active(link
);
1269 smcr_lgr_set_type(link
->lgr
, SMC_LGR_SINGLE
);
1271 /* initial contact - try to establish second link */
1272 smc_llc_srv_add_link(link
);
1276 /* listen worker: finish */
1277 static void smc_listen_out(struct smc_sock
*new_smc
)
1279 struct smc_sock
*lsmc
= new_smc
->listen_smc
;
1280 struct sock
*newsmcsk
= &new_smc
->sk
;
1282 if (lsmc
->sk
.sk_state
== SMC_LISTEN
) {
1283 lock_sock_nested(&lsmc
->sk
, SINGLE_DEPTH_NESTING
);
1284 smc_accept_enqueue(&lsmc
->sk
, newsmcsk
);
1285 release_sock(&lsmc
->sk
);
1286 } else { /* no longer listening */
1287 smc_close_non_accepted(newsmcsk
);
1290 /* Wake up accept */
1291 lsmc
->sk
.sk_data_ready(&lsmc
->sk
);
1292 sock_put(&lsmc
->sk
); /* sock_hold in smc_tcp_listen_work */
1295 /* listen worker: finish in state connected */
1296 static void smc_listen_out_connected(struct smc_sock
*new_smc
)
1298 struct sock
*newsmcsk
= &new_smc
->sk
;
1300 sk_refcnt_debug_inc(newsmcsk
);
1301 if (newsmcsk
->sk_state
== SMC_INIT
)
1302 newsmcsk
->sk_state
= SMC_ACTIVE
;
1304 smc_listen_out(new_smc
);
1307 /* listen worker: finish in error state */
1308 static void smc_listen_out_err(struct smc_sock
*new_smc
)
1310 struct sock
*newsmcsk
= &new_smc
->sk
;
1312 if (newsmcsk
->sk_state
== SMC_INIT
)
1313 sock_put(&new_smc
->sk
); /* passive closing */
1314 newsmcsk
->sk_state
= SMC_CLOSED
;
1316 smc_listen_out(new_smc
);
1319 /* listen worker: decline and fall back if possible */
1320 static void smc_listen_decline(struct smc_sock
*new_smc
, int reason_code
,
1321 int local_first
, u8 version
)
1323 /* RDMA setup failed, switch back to TCP */
1324 smc_conn_abort(new_smc
, local_first
);
1325 if (reason_code
< 0) { /* error, no fallback possible */
1326 smc_listen_out_err(new_smc
);
1329 smc_switch_to_fallback(new_smc
);
1330 new_smc
->fallback_rsn
= reason_code
;
1331 if (reason_code
&& reason_code
!= SMC_CLC_DECL_PEERDECL
) {
1332 if (smc_clc_send_decline(new_smc
, reason_code
, version
) < 0) {
1333 smc_listen_out_err(new_smc
);
1337 smc_listen_out_connected(new_smc
);
1340 /* listen worker: version checking */
1341 static int smc_listen_v2_check(struct smc_sock
*new_smc
,
1342 struct smc_clc_msg_proposal
*pclc
,
1343 struct smc_init_info
*ini
)
1345 struct smc_clc_smcd_v2_extension
*pclc_smcd_v2_ext
;
1346 struct smc_clc_v2_extension
*pclc_v2_ext
;
1347 int rc
= SMC_CLC_DECL_PEERNOSMC
;
1349 ini
->smc_type_v1
= pclc
->hdr
.typev1
;
1350 ini
->smc_type_v2
= pclc
->hdr
.typev2
;
1351 ini
->smcd_version
= ini
->smc_type_v1
!= SMC_TYPE_N
? SMC_V1
: 0;
1352 if (pclc
->hdr
.version
> SMC_V1
)
1353 ini
->smcd_version
|=
1354 ini
->smc_type_v2
!= SMC_TYPE_N
? SMC_V2
: 0;
1355 if (!(ini
->smcd_version
& SMC_V2
)) {
1356 rc
= SMC_CLC_DECL_PEERNOSMC
;
1359 if (!smc_ism_is_v2_capable()) {
1360 ini
->smcd_version
&= ~SMC_V2
;
1361 rc
= SMC_CLC_DECL_NOISM2SUPP
;
1364 pclc_v2_ext
= smc_get_clc_v2_ext(pclc
);
1366 ini
->smcd_version
&= ~SMC_V2
;
1367 rc
= SMC_CLC_DECL_NOV2EXT
;
1370 pclc_smcd_v2_ext
= smc_get_clc_smcd_v2_ext(pclc_v2_ext
);
1371 if (!pclc_smcd_v2_ext
) {
1372 ini
->smcd_version
&= ~SMC_V2
;
1373 rc
= SMC_CLC_DECL_NOV2DEXT
;
1377 if (!ini
->smcd_version
)
1383 /* listen worker: check prefixes */
1384 static int smc_listen_prfx_check(struct smc_sock
*new_smc
,
1385 struct smc_clc_msg_proposal
*pclc
)
1387 struct smc_clc_msg_proposal_prefix
*pclc_prfx
;
1388 struct socket
*newclcsock
= new_smc
->clcsock
;
1390 if (pclc
->hdr
.typev1
== SMC_TYPE_N
)
1392 pclc_prfx
= smc_clc_proposal_get_prefix(pclc
);
1393 if (smc_clc_prfx_match(newclcsock
, pclc_prfx
))
1394 return SMC_CLC_DECL_DIFFPREFIX
;
1399 /* listen worker: initialize connection and buffers */
1400 static int smc_listen_rdma_init(struct smc_sock
*new_smc
,
1401 struct smc_init_info
*ini
)
1405 /* allocate connection / link group */
1406 rc
= smc_conn_create(new_smc
, ini
);
1410 /* create send buffer and rmb */
1411 if (smc_buf_create(new_smc
, false))
1412 return SMC_CLC_DECL_MEM
;
1417 /* listen worker: initialize connection and buffers for SMC-D */
1418 static int smc_listen_ism_init(struct smc_sock
*new_smc
,
1419 struct smc_init_info
*ini
)
1423 rc
= smc_conn_create(new_smc
, ini
);
1427 /* Create send and receive buffers */
1428 rc
= smc_buf_create(new_smc
, true);
1430 smc_conn_abort(new_smc
, ini
->first_contact_local
);
1431 return (rc
== -ENOSPC
) ? SMC_CLC_DECL_MAX_DMB
:
1438 static bool smc_is_already_selected(struct smcd_dev
*smcd
,
1439 struct smc_init_info
*ini
,
1444 for (i
= 0; i
< matches
; i
++)
1445 if (smcd
== ini
->ism_dev
[i
])
1451 /* check for ISM devices matching proposed ISM devices */
1452 static void smc_check_ism_v2_match(struct smc_init_info
*ini
,
1453 u16 proposed_chid
, u64 proposed_gid
,
1454 unsigned int *matches
)
1456 struct smcd_dev
*smcd
;
1458 list_for_each_entry(smcd
, &smcd_dev_list
.list
, list
) {
1459 if (smcd
->going_away
)
1461 if (smc_is_already_selected(smcd
, ini
, *matches
))
1463 if (smc_ism_get_chid(smcd
) == proposed_chid
&&
1464 !smc_ism_cantalk(proposed_gid
, ISM_RESERVED_VLANID
, smcd
)) {
1465 ini
->ism_peer_gid
[*matches
] = proposed_gid
;
1466 ini
->ism_dev
[*matches
] = smcd
;
1473 static void smc_find_ism_store_rc(u32 rc
, struct smc_init_info
*ini
)
1479 static void smc_find_ism_v2_device_serv(struct smc_sock
*new_smc
,
1480 struct smc_clc_msg_proposal
*pclc
,
1481 struct smc_init_info
*ini
)
1483 struct smc_clc_smcd_v2_extension
*smcd_v2_ext
;
1484 struct smc_clc_v2_extension
*smc_v2_ext
;
1485 struct smc_clc_msg_smcd
*pclc_smcd
;
1486 unsigned int matches
= 0;
1491 if (!(ini
->smcd_version
& SMC_V2
) || !smcd_indicated(ini
->smc_type_v2
))
1494 pclc_smcd
= smc_get_clc_msg_smcd(pclc
);
1495 smc_v2_ext
= smc_get_clc_v2_ext(pclc
);
1496 smcd_v2_ext
= smc_get_clc_smcd_v2_ext(smc_v2_ext
);
1498 !smc_v2_ext
->hdr
.flag
.seid
) { /* no system EID support for SMCD */
1499 smc_find_ism_store_rc(SMC_CLC_DECL_NOSEID
, ini
);
1503 mutex_lock(&smcd_dev_list
.mutex
);
1504 if (pclc_smcd
->ism
.chid
)
1505 /* check for ISM device matching proposed native ISM device */
1506 smc_check_ism_v2_match(ini
, ntohs(pclc_smcd
->ism
.chid
),
1507 ntohll(pclc_smcd
->ism
.gid
), &matches
);
1508 for (i
= 1; i
<= smc_v2_ext
->hdr
.ism_gid_cnt
; i
++) {
1509 /* check for ISM devices matching proposed non-native ISM
1512 smc_check_ism_v2_match(ini
,
1513 ntohs(smcd_v2_ext
->gidchid
[i
- 1].chid
),
1514 ntohll(smcd_v2_ext
->gidchid
[i
- 1].gid
),
1517 mutex_unlock(&smcd_dev_list
.mutex
);
1519 if (ini
->ism_dev
[0]) {
1520 smc_ism_get_system_eid(ini
->ism_dev
[0], &eid
);
1521 if (memcmp(eid
, smcd_v2_ext
->system_eid
, SMC_MAX_EID_LEN
))
1527 /* separate - outside the smcd_dev_list.lock */
1528 smcd_version
= ini
->smcd_version
;
1529 for (i
= 0; i
< matches
; i
++) {
1530 ini
->smcd_version
= SMC_V2
;
1531 ini
->is_smcd
= true;
1532 ini
->ism_selected
= i
;
1533 rc
= smc_listen_ism_init(new_smc
, ini
);
1535 smc_find_ism_store_rc(rc
, ini
);
1536 /* try next active ISM device */
1539 return; /* matching and usable V2 ISM device found */
1541 /* no V2 ISM device could be initialized */
1542 ini
->smcd_version
= smcd_version
; /* restore original value */
1545 ini
->smcd_version
&= ~SMC_V2
;
1546 ini
->ism_dev
[0] = NULL
;
1547 ini
->is_smcd
= false;
1550 static void smc_find_ism_v1_device_serv(struct smc_sock
*new_smc
,
1551 struct smc_clc_msg_proposal
*pclc
,
1552 struct smc_init_info
*ini
)
1554 struct smc_clc_msg_smcd
*pclc_smcd
= smc_get_clc_msg_smcd(pclc
);
1557 /* check if ISM V1 is available */
1558 if (!(ini
->smcd_version
& SMC_V1
) || !smcd_indicated(ini
->smc_type_v1
))
1560 ini
->is_smcd
= true; /* prepare ISM check */
1561 ini
->ism_peer_gid
[0] = ntohll(pclc_smcd
->ism
.gid
);
1562 rc
= smc_find_ism_device(new_smc
, ini
);
1565 ini
->ism_selected
= 0;
1566 rc
= smc_listen_ism_init(new_smc
, ini
);
1568 return; /* V1 ISM device found */
1571 smc_find_ism_store_rc(rc
, ini
);
1572 ini
->ism_dev
[0] = NULL
;
1573 ini
->is_smcd
= false;
1576 /* listen worker: register buffers */
1577 static int smc_listen_rdma_reg(struct smc_sock
*new_smc
, bool local_first
)
1579 struct smc_connection
*conn
= &new_smc
->conn
;
1582 if (smcr_lgr_reg_rmbs(conn
->lnk
, conn
->rmb_desc
))
1583 return SMC_CLC_DECL_ERR_REGRMB
;
1585 smc_rmb_sync_sg_for_device(&new_smc
->conn
);
1590 static int smc_find_rdma_v1_device_serv(struct smc_sock
*new_smc
,
1591 struct smc_clc_msg_proposal
*pclc
,
1592 struct smc_init_info
*ini
)
1596 if (!smcr_indicated(ini
->smc_type_v1
))
1597 return SMC_CLC_DECL_NOSMCDEV
;
1599 /* prepare RDMA check */
1600 ini
->ib_lcl
= &pclc
->lcl
;
1601 rc
= smc_find_rdma_device(new_smc
, ini
);
1603 /* no RDMA device found */
1604 if (ini
->smc_type_v1
== SMC_TYPE_B
)
1605 /* neither ISM nor RDMA device found */
1606 rc
= SMC_CLC_DECL_NOSMCDEV
;
1609 rc
= smc_listen_rdma_init(new_smc
, ini
);
1612 return smc_listen_rdma_reg(new_smc
, ini
->first_contact_local
);
1615 /* determine the local device matching to proposal */
1616 static int smc_listen_find_device(struct smc_sock
*new_smc
,
1617 struct smc_clc_msg_proposal
*pclc
,
1618 struct smc_init_info
*ini
)
1622 /* check for ISM device matching V2 proposed device */
1623 smc_find_ism_v2_device_serv(new_smc
, pclc
, ini
);
1624 if (ini
->ism_dev
[0])
1627 if (!(ini
->smcd_version
& SMC_V1
))
1628 return ini
->rc
?: SMC_CLC_DECL_NOSMCD2DEV
;
1630 /* check for matching IP prefix and subnet length */
1631 rc
= smc_listen_prfx_check(new_smc
, pclc
);
1633 return ini
->rc
?: rc
;
1635 /* get vlan id from IP device */
1636 if (smc_vlan_by_tcpsk(new_smc
->clcsock
, ini
))
1637 return ini
->rc
?: SMC_CLC_DECL_GETVLANERR
;
1639 /* check for ISM device matching V1 proposed device */
1640 smc_find_ism_v1_device_serv(new_smc
, pclc
, ini
);
1641 if (ini
->ism_dev
[0])
1644 if (pclc
->hdr
.typev1
== SMC_TYPE_D
)
1645 /* skip RDMA and decline */
1646 return ini
->rc
?: SMC_CLC_DECL_NOSMCDDEV
;
1648 /* check if RDMA is available */
1649 rc
= smc_find_rdma_v1_device_serv(new_smc
, pclc
, ini
);
1650 smc_find_ism_store_rc(rc
, ini
);
1652 return (!rc
) ? 0 : ini
->rc
;
1655 /* listen worker: finish RDMA setup */
1656 static int smc_listen_rdma_finish(struct smc_sock
*new_smc
,
1657 struct smc_clc_msg_accept_confirm
*cclc
,
1660 struct smc_link
*link
= new_smc
->conn
.lnk
;
1661 int reason_code
= 0;
1664 smc_link_save_peer_info(link
, cclc
);
1666 if (smc_rmb_rtoken_handling(&new_smc
->conn
, link
, cclc
))
1667 return SMC_CLC_DECL_ERR_RTOK
;
1670 if (smc_ib_ready_link(link
))
1671 return SMC_CLC_DECL_ERR_RDYLNK
;
1672 /* QP confirmation over RoCE fabric */
1673 smc_llc_flow_initiate(link
->lgr
, SMC_LLC_FLOW_ADD_LINK
);
1674 reason_code
= smcr_serv_conf_first_link(new_smc
);
1675 smc_llc_flow_stop(link
->lgr
, &link
->lgr
->llc_flow_lcl
);
1680 /* setup for connection of server */
1681 static void smc_listen_work(struct work_struct
*work
)
1683 struct smc_sock
*new_smc
= container_of(work
, struct smc_sock
,
1685 u8 version
= smc_ism_is_v2_capable() ? SMC_V2
: SMC_V1
;
1686 struct socket
*newclcsock
= new_smc
->clcsock
;
1687 struct smc_clc_msg_accept_confirm
*cclc
;
1688 struct smc_clc_msg_proposal_area
*buf
;
1689 struct smc_clc_msg_proposal
*pclc
;
1690 struct smc_init_info
*ini
= NULL
;
1693 if (new_smc
->listen_smc
->sk
.sk_state
!= SMC_LISTEN
)
1694 return smc_listen_out_err(new_smc
);
1696 if (new_smc
->use_fallback
) {
1697 smc_listen_out_connected(new_smc
);
1701 /* check if peer is smc capable */
1702 if (!tcp_sk(newclcsock
->sk
)->syn_smc
) {
1703 smc_switch_to_fallback(new_smc
);
1704 new_smc
->fallback_rsn
= SMC_CLC_DECL_PEERNOSMC
;
1705 smc_listen_out_connected(new_smc
);
1709 /* do inband token exchange -
1710 * wait for and receive SMC Proposal CLC message
1712 buf
= kzalloc(sizeof(*buf
), GFP_KERNEL
);
1714 rc
= SMC_CLC_DECL_MEM
;
1717 pclc
= (struct smc_clc_msg_proposal
*)buf
;
1718 rc
= smc_clc_wait_msg(new_smc
, pclc
, sizeof(*buf
),
1719 SMC_CLC_PROPOSAL
, CLC_WAIT_TIME
);
1722 version
= pclc
->hdr
.version
== SMC_V1
? SMC_V1
: version
;
1724 /* IPSec connections opt out of SMC optimizations */
1725 if (using_ipsec(new_smc
)) {
1726 rc
= SMC_CLC_DECL_IPSEC
;
1730 ini
= kzalloc(sizeof(*ini
), GFP_KERNEL
);
1732 rc
= SMC_CLC_DECL_MEM
;
1736 /* initial version checking */
1737 rc
= smc_listen_v2_check(new_smc
, pclc
, ini
);
1741 mutex_lock(&smc_server_lgr_pending
);
1742 smc_close_init(new_smc
);
1743 smc_rx_init(new_smc
);
1744 smc_tx_init(new_smc
);
1746 /* determine ISM or RoCE device used for connection */
1747 rc
= smc_listen_find_device(new_smc
, pclc
, ini
);
1751 /* send SMC Accept CLC message */
1752 rc
= smc_clc_send_accept(new_smc
, ini
->first_contact_local
,
1753 ini
->smcd_version
== SMC_V2
? SMC_V2
: SMC_V1
);
1757 /* SMC-D does not need this lock any more */
1759 mutex_unlock(&smc_server_lgr_pending
);
1761 /* receive SMC Confirm CLC message */
1762 memset(buf
, 0, sizeof(*buf
));
1763 cclc
= (struct smc_clc_msg_accept_confirm
*)buf
;
1764 rc
= smc_clc_wait_msg(new_smc
, cclc
, sizeof(*buf
),
1765 SMC_CLC_CONFIRM
, CLC_WAIT_TIME
);
1773 if (!ini
->is_smcd
) {
1774 rc
= smc_listen_rdma_finish(new_smc
, cclc
,
1775 ini
->first_contact_local
);
1778 mutex_unlock(&smc_server_lgr_pending
);
1780 smc_conn_save_peer_info(new_smc
, cclc
);
1781 smc_listen_out_connected(new_smc
);
1785 mutex_unlock(&smc_server_lgr_pending
);
1787 smc_listen_decline(new_smc
, rc
, ini
? ini
->first_contact_local
: 0,
1794 static void smc_tcp_listen_work(struct work_struct
*work
)
1796 struct smc_sock
*lsmc
= container_of(work
, struct smc_sock
,
1798 struct sock
*lsk
= &lsmc
->sk
;
1799 struct smc_sock
*new_smc
;
1803 while (lsk
->sk_state
== SMC_LISTEN
) {
1804 rc
= smc_clcsock_accept(lsmc
, &new_smc
);
1805 if (rc
) /* clcsock accept queue empty or error */
1810 new_smc
->listen_smc
= lsmc
;
1811 new_smc
->use_fallback
= lsmc
->use_fallback
;
1812 new_smc
->fallback_rsn
= lsmc
->fallback_rsn
;
1813 sock_hold(lsk
); /* sock_put in smc_listen_work */
1814 INIT_WORK(&new_smc
->smc_listen_work
, smc_listen_work
);
1815 smc_copy_sock_settings_to_smc(new_smc
);
1816 new_smc
->sk
.sk_sndbuf
= lsmc
->sk
.sk_sndbuf
;
1817 new_smc
->sk
.sk_rcvbuf
= lsmc
->sk
.sk_rcvbuf
;
1818 sock_hold(&new_smc
->sk
); /* sock_put in passive closing */
1819 if (!queue_work(smc_hs_wq
, &new_smc
->smc_listen_work
))
1820 sock_put(&new_smc
->sk
);
1825 sock_put(&lsmc
->sk
); /* sock_hold in smc_clcsock_data_ready() */
1828 static void smc_clcsock_data_ready(struct sock
*listen_clcsock
)
1830 struct smc_sock
*lsmc
;
1832 lsmc
= (struct smc_sock
*)
1833 ((uintptr_t)listen_clcsock
->sk_user_data
& ~SK_USER_DATA_NOCOPY
);
1836 lsmc
->clcsk_data_ready(listen_clcsock
);
1837 if (lsmc
->sk
.sk_state
== SMC_LISTEN
) {
1838 sock_hold(&lsmc
->sk
); /* sock_put in smc_tcp_listen_work() */
1839 if (!queue_work(smc_hs_wq
, &lsmc
->tcp_listen_work
))
1840 sock_put(&lsmc
->sk
);
1844 static int smc_listen(struct socket
*sock
, int backlog
)
1846 struct sock
*sk
= sock
->sk
;
1847 struct smc_sock
*smc
;
1854 if ((sk
->sk_state
!= SMC_INIT
&& sk
->sk_state
!= SMC_LISTEN
) ||
1855 smc
->connect_nonblock
)
1859 if (sk
->sk_state
== SMC_LISTEN
) {
1860 sk
->sk_max_ack_backlog
= backlog
;
1863 /* some socket options are handled in core, so we could not apply
1864 * them to the clc socket -- copy smc socket options to clc socket
1866 smc_copy_sock_settings_to_clc(smc
);
1867 if (!smc
->use_fallback
)
1868 tcp_sk(smc
->clcsock
->sk
)->syn_smc
= 1;
1870 /* save original sk_data_ready function and establish
1871 * smc-specific sk_data_ready function
1873 smc
->clcsk_data_ready
= smc
->clcsock
->sk
->sk_data_ready
;
1874 smc
->clcsock
->sk
->sk_data_ready
= smc_clcsock_data_ready
;
1875 smc
->clcsock
->sk
->sk_user_data
=
1876 (void *)((uintptr_t)smc
| SK_USER_DATA_NOCOPY
);
1877 rc
= kernel_listen(smc
->clcsock
, backlog
);
1880 sk
->sk_max_ack_backlog
= backlog
;
1881 sk
->sk_ack_backlog
= 0;
1882 sk
->sk_state
= SMC_LISTEN
;
1889 static int smc_accept(struct socket
*sock
, struct socket
*new_sock
,
1890 int flags
, bool kern
)
1892 struct sock
*sk
= sock
->sk
, *nsk
;
1893 DECLARE_WAITQUEUE(wait
, current
);
1894 struct smc_sock
*lsmc
;
1899 sock_hold(sk
); /* sock_put below */
1902 if (lsmc
->sk
.sk_state
!= SMC_LISTEN
) {
1908 /* Wait for an incoming connection */
1909 timeo
= sock_rcvtimeo(sk
, flags
& O_NONBLOCK
);
1910 add_wait_queue_exclusive(sk_sleep(sk
), &wait
);
1911 while (!(nsk
= smc_accept_dequeue(sk
, new_sock
))) {
1912 set_current_state(TASK_INTERRUPTIBLE
);
1918 timeo
= schedule_timeout(timeo
);
1919 /* wakeup by sk_data_ready in smc_listen_work() */
1920 sched_annotate_sleep();
1922 if (signal_pending(current
)) {
1923 rc
= sock_intr_errno(timeo
);
1927 set_current_state(TASK_RUNNING
);
1928 remove_wait_queue(sk_sleep(sk
), &wait
);
1931 rc
= sock_error(nsk
);
1936 if (lsmc
->sockopt_defer_accept
&& !(flags
& O_NONBLOCK
)) {
1937 /* wait till data arrives on the socket */
1938 timeo
= msecs_to_jiffies(lsmc
->sockopt_defer_accept
*
1940 if (smc_sk(nsk
)->use_fallback
) {
1941 struct sock
*clcsk
= smc_sk(nsk
)->clcsock
->sk
;
1944 if (skb_queue_empty(&clcsk
->sk_receive_queue
))
1945 sk_wait_data(clcsk
, &timeo
, NULL
);
1946 release_sock(clcsk
);
1947 } else if (!atomic_read(&smc_sk(nsk
)->conn
.bytes_to_rcv
)) {
1949 smc_rx_wait(smc_sk(nsk
), &timeo
, smc_rx_data_available
);
1955 sock_put(sk
); /* sock_hold above */
1959 static int smc_getname(struct socket
*sock
, struct sockaddr
*addr
,
1962 struct smc_sock
*smc
;
1964 if (peer
&& (sock
->sk
->sk_state
!= SMC_ACTIVE
) &&
1965 (sock
->sk
->sk_state
!= SMC_APPCLOSEWAIT1
))
1968 smc
= smc_sk(sock
->sk
);
1970 return smc
->clcsock
->ops
->getname(smc
->clcsock
, addr
, peer
);
1973 static int smc_sendmsg(struct socket
*sock
, struct msghdr
*msg
, size_t len
)
1975 struct sock
*sk
= sock
->sk
;
1976 struct smc_sock
*smc
;
1981 if ((sk
->sk_state
!= SMC_ACTIVE
) &&
1982 (sk
->sk_state
!= SMC_APPCLOSEWAIT1
) &&
1983 (sk
->sk_state
!= SMC_INIT
))
1986 if (msg
->msg_flags
& MSG_FASTOPEN
) {
1987 if (sk
->sk_state
== SMC_INIT
&& !smc
->connect_nonblock
) {
1988 smc_switch_to_fallback(smc
);
1989 smc
->fallback_rsn
= SMC_CLC_DECL_OPTUNSUPP
;
1996 if (smc
->use_fallback
)
1997 rc
= smc
->clcsock
->ops
->sendmsg(smc
->clcsock
, msg
, len
);
1999 rc
= smc_tx_sendmsg(smc
, msg
, len
);
2005 static int smc_recvmsg(struct socket
*sock
, struct msghdr
*msg
, size_t len
,
2008 struct sock
*sk
= sock
->sk
;
2009 struct smc_sock
*smc
;
2014 if (sk
->sk_state
== SMC_CLOSED
&& (sk
->sk_shutdown
& RCV_SHUTDOWN
)) {
2015 /* socket was connected before, no more data to read */
2019 if ((sk
->sk_state
== SMC_INIT
) ||
2020 (sk
->sk_state
== SMC_LISTEN
) ||
2021 (sk
->sk_state
== SMC_CLOSED
))
2024 if (sk
->sk_state
== SMC_PEERFINCLOSEWAIT
) {
2029 if (smc
->use_fallback
) {
2030 rc
= smc
->clcsock
->ops
->recvmsg(smc
->clcsock
, msg
, len
, flags
);
2032 msg
->msg_namelen
= 0;
2033 rc
= smc_rx_recvmsg(smc
, msg
, NULL
, len
, flags
);
2041 static __poll_t
smc_accept_poll(struct sock
*parent
)
2043 struct smc_sock
*isk
= smc_sk(parent
);
2046 spin_lock(&isk
->accept_q_lock
);
2047 if (!list_empty(&isk
->accept_q
))
2048 mask
= EPOLLIN
| EPOLLRDNORM
;
2049 spin_unlock(&isk
->accept_q_lock
);
2054 static __poll_t
smc_poll(struct file
*file
, struct socket
*sock
,
2057 struct sock
*sk
= sock
->sk
;
2058 struct smc_sock
*smc
;
2064 smc
= smc_sk(sock
->sk
);
2065 if (smc
->use_fallback
) {
2066 /* delegate to CLC child sock */
2067 mask
= smc
->clcsock
->ops
->poll(file
, smc
->clcsock
, wait
);
2068 sk
->sk_err
= smc
->clcsock
->sk
->sk_err
;
2070 if (sk
->sk_state
!= SMC_CLOSED
)
2071 sock_poll_wait(file
, sock
, wait
);
2074 if ((sk
->sk_shutdown
== SHUTDOWN_MASK
) ||
2075 (sk
->sk_state
== SMC_CLOSED
))
2077 if (sk
->sk_state
== SMC_LISTEN
) {
2078 /* woken up by sk_data_ready in smc_listen_work() */
2079 mask
|= smc_accept_poll(sk
);
2080 } else if (smc
->use_fallback
) { /* as result of connect_work()*/
2081 mask
|= smc
->clcsock
->ops
->poll(file
, smc
->clcsock
,
2083 sk
->sk_err
= smc
->clcsock
->sk
->sk_err
;
2085 if ((sk
->sk_state
!= SMC_INIT
&&
2086 atomic_read(&smc
->conn
.sndbuf_space
)) ||
2087 sk
->sk_shutdown
& SEND_SHUTDOWN
) {
2088 mask
|= EPOLLOUT
| EPOLLWRNORM
;
2090 sk_set_bit(SOCKWQ_ASYNC_NOSPACE
, sk
);
2091 set_bit(SOCK_NOSPACE
, &sk
->sk_socket
->flags
);
2093 if (atomic_read(&smc
->conn
.bytes_to_rcv
))
2094 mask
|= EPOLLIN
| EPOLLRDNORM
;
2095 if (sk
->sk_shutdown
& RCV_SHUTDOWN
)
2096 mask
|= EPOLLIN
| EPOLLRDNORM
| EPOLLRDHUP
;
2097 if (sk
->sk_state
== SMC_APPCLOSEWAIT1
)
2099 if (smc
->conn
.urg_state
== SMC_URG_VALID
)
2107 static int smc_shutdown(struct socket
*sock
, int how
)
2109 struct sock
*sk
= sock
->sk
;
2110 struct smc_sock
*smc
;
2116 if ((how
< SHUT_RD
) || (how
> SHUT_RDWR
))
2122 if ((sk
->sk_state
!= SMC_ACTIVE
) &&
2123 (sk
->sk_state
!= SMC_PEERCLOSEWAIT1
) &&
2124 (sk
->sk_state
!= SMC_PEERCLOSEWAIT2
) &&
2125 (sk
->sk_state
!= SMC_APPCLOSEWAIT1
) &&
2126 (sk
->sk_state
!= SMC_APPCLOSEWAIT2
) &&
2127 (sk
->sk_state
!= SMC_APPFINCLOSEWAIT
))
2129 if (smc
->use_fallback
) {
2130 rc
= kernel_sock_shutdown(smc
->clcsock
, how
);
2131 sk
->sk_shutdown
= smc
->clcsock
->sk
->sk_shutdown
;
2132 if (sk
->sk_shutdown
== SHUTDOWN_MASK
)
2133 sk
->sk_state
= SMC_CLOSED
;
2137 case SHUT_RDWR
: /* shutdown in both directions */
2138 rc
= smc_close_active(smc
);
2141 rc
= smc_close_shutdown_write(smc
);
2145 /* nothing more to do because peer is not involved */
2149 rc1
= kernel_sock_shutdown(smc
->clcsock
, how
);
2150 /* map sock_shutdown_cmd constants to sk_shutdown value range */
2151 sk
->sk_shutdown
|= how
+ 1;
2155 return rc
? rc
: rc1
;
2158 static int smc_setsockopt(struct socket
*sock
, int level
, int optname
,
2159 sockptr_t optval
, unsigned int optlen
)
2161 struct sock
*sk
= sock
->sk
;
2162 struct smc_sock
*smc
;
2167 /* generic setsockopts reaching us here always apply to the
2170 if (unlikely(!smc
->clcsock
->ops
->setsockopt
))
2173 rc
= smc
->clcsock
->ops
->setsockopt(smc
->clcsock
, level
, optname
,
2175 if (smc
->clcsock
->sk
->sk_err
) {
2176 sk
->sk_err
= smc
->clcsock
->sk
->sk_err
;
2177 sk
->sk_error_report(sk
);
2180 if (optlen
< sizeof(int))
2182 if (copy_from_sockptr(&val
, optval
, sizeof(int)))
2186 if (rc
|| smc
->use_fallback
)
2191 case TCP_FASTOPEN_CONNECT
:
2192 case TCP_FASTOPEN_KEY
:
2193 case TCP_FASTOPEN_NO_COOKIE
:
2194 /* option not supported by SMC */
2195 if (sk
->sk_state
== SMC_INIT
&& !smc
->connect_nonblock
) {
2196 smc_switch_to_fallback(smc
);
2197 smc
->fallback_rsn
= SMC_CLC_DECL_OPTUNSUPP
;
2203 if (sk
->sk_state
!= SMC_INIT
&&
2204 sk
->sk_state
!= SMC_LISTEN
&&
2205 sk
->sk_state
!= SMC_CLOSED
) {
2207 mod_delayed_work(smc
->conn
.lgr
->tx_wq
,
2208 &smc
->conn
.tx_work
, 0);
2212 if (sk
->sk_state
!= SMC_INIT
&&
2213 sk
->sk_state
!= SMC_LISTEN
&&
2214 sk
->sk_state
!= SMC_CLOSED
) {
2216 mod_delayed_work(smc
->conn
.lgr
->tx_wq
,
2217 &smc
->conn
.tx_work
, 0);
2220 case TCP_DEFER_ACCEPT
:
2221 smc
->sockopt_defer_accept
= val
;
2232 static int smc_getsockopt(struct socket
*sock
, int level
, int optname
,
2233 char __user
*optval
, int __user
*optlen
)
2235 struct smc_sock
*smc
;
2237 smc
= smc_sk(sock
->sk
);
2238 /* socket options apply to the CLC socket */
2239 if (unlikely(!smc
->clcsock
->ops
->getsockopt
))
2241 return smc
->clcsock
->ops
->getsockopt(smc
->clcsock
, level
, optname
,
2245 static int smc_ioctl(struct socket
*sock
, unsigned int cmd
,
2248 union smc_host_cursor cons
, urg
;
2249 struct smc_connection
*conn
;
2250 struct smc_sock
*smc
;
2253 smc
= smc_sk(sock
->sk
);
2255 lock_sock(&smc
->sk
);
2256 if (smc
->use_fallback
) {
2257 if (!smc
->clcsock
) {
2258 release_sock(&smc
->sk
);
2261 answ
= smc
->clcsock
->ops
->ioctl(smc
->clcsock
, cmd
, arg
);
2262 release_sock(&smc
->sk
);
2266 case SIOCINQ
: /* same as FIONREAD */
2267 if (smc
->sk
.sk_state
== SMC_LISTEN
) {
2268 release_sock(&smc
->sk
);
2271 if (smc
->sk
.sk_state
== SMC_INIT
||
2272 smc
->sk
.sk_state
== SMC_CLOSED
)
2275 answ
= atomic_read(&smc
->conn
.bytes_to_rcv
);
2278 /* output queue size (not send + not acked) */
2279 if (smc
->sk
.sk_state
== SMC_LISTEN
) {
2280 release_sock(&smc
->sk
);
2283 if (smc
->sk
.sk_state
== SMC_INIT
||
2284 smc
->sk
.sk_state
== SMC_CLOSED
)
2287 answ
= smc
->conn
.sndbuf_desc
->len
-
2288 atomic_read(&smc
->conn
.sndbuf_space
);
2291 /* output queue size (not send only) */
2292 if (smc
->sk
.sk_state
== SMC_LISTEN
) {
2293 release_sock(&smc
->sk
);
2296 if (smc
->sk
.sk_state
== SMC_INIT
||
2297 smc
->sk
.sk_state
== SMC_CLOSED
)
2300 answ
= smc_tx_prepared_sends(&smc
->conn
);
2303 if (smc
->sk
.sk_state
== SMC_LISTEN
) {
2304 release_sock(&smc
->sk
);
2307 if (smc
->sk
.sk_state
== SMC_INIT
||
2308 smc
->sk
.sk_state
== SMC_CLOSED
) {
2311 smc_curs_copy(&cons
, &conn
->local_tx_ctrl
.cons
, conn
);
2312 smc_curs_copy(&urg
, &conn
->urg_curs
, conn
);
2313 answ
= smc_curs_diff(conn
->rmb_desc
->len
,
2318 release_sock(&smc
->sk
);
2319 return -ENOIOCTLCMD
;
2321 release_sock(&smc
->sk
);
2323 return put_user(answ
, (int __user
*)arg
);
2326 static ssize_t
smc_sendpage(struct socket
*sock
, struct page
*page
,
2327 int offset
, size_t size
, int flags
)
2329 struct sock
*sk
= sock
->sk
;
2330 struct smc_sock
*smc
;
2335 if (sk
->sk_state
!= SMC_ACTIVE
) {
2340 if (smc
->use_fallback
)
2341 rc
= kernel_sendpage(smc
->clcsock
, page
, offset
,
2344 rc
= sock_no_sendpage(sock
, page
, offset
, size
, flags
);
2350 /* Map the affected portions of the rmbe into an spd, note the number of bytes
2351 * to splice in conn->splice_pending, and press 'go'. Delays consumer cursor
2352 * updates till whenever a respective page has been fully processed.
2353 * Note that subsequent recv() calls have to wait till all splice() processing
2356 static ssize_t
smc_splice_read(struct socket
*sock
, loff_t
*ppos
,
2357 struct pipe_inode_info
*pipe
, size_t len
,
2360 struct sock
*sk
= sock
->sk
;
2361 struct smc_sock
*smc
;
2366 if (sk
->sk_state
== SMC_CLOSED
&& (sk
->sk_shutdown
& RCV_SHUTDOWN
)) {
2367 /* socket was connected before, no more data to read */
2371 if (sk
->sk_state
== SMC_INIT
||
2372 sk
->sk_state
== SMC_LISTEN
||
2373 sk
->sk_state
== SMC_CLOSED
)
2376 if (sk
->sk_state
== SMC_PEERFINCLOSEWAIT
) {
2381 if (smc
->use_fallback
) {
2382 rc
= smc
->clcsock
->ops
->splice_read(smc
->clcsock
, ppos
,
2389 if (flags
& SPLICE_F_NONBLOCK
)
2390 flags
= MSG_DONTWAIT
;
2393 rc
= smc_rx_recvmsg(smc
, NULL
, pipe
, len
, flags
);
2401 /* must look like tcp */
2402 static const struct proto_ops smc_sock_ops
= {
2404 .owner
= THIS_MODULE
,
2405 .release
= smc_release
,
2407 .connect
= smc_connect
,
2408 .socketpair
= sock_no_socketpair
,
2409 .accept
= smc_accept
,
2410 .getname
= smc_getname
,
2413 .listen
= smc_listen
,
2414 .shutdown
= smc_shutdown
,
2415 .setsockopt
= smc_setsockopt
,
2416 .getsockopt
= smc_getsockopt
,
2417 .sendmsg
= smc_sendmsg
,
2418 .recvmsg
= smc_recvmsg
,
2419 .mmap
= sock_no_mmap
,
2420 .sendpage
= smc_sendpage
,
2421 .splice_read
= smc_splice_read
,
2424 static int smc_create(struct net
*net
, struct socket
*sock
, int protocol
,
2427 int family
= (protocol
== SMCPROTO_SMC6
) ? PF_INET6
: PF_INET
;
2428 struct smc_sock
*smc
;
2432 rc
= -ESOCKTNOSUPPORT
;
2433 if (sock
->type
!= SOCK_STREAM
)
2436 rc
= -EPROTONOSUPPORT
;
2437 if (protocol
!= SMCPROTO_SMC
&& protocol
!= SMCPROTO_SMC6
)
2441 sock
->ops
= &smc_sock_ops
;
2442 sk
= smc_sock_alloc(net
, sock
, protocol
);
2446 /* create internal TCP socket for CLC handshake and fallback */
2448 smc
->use_fallback
= false; /* assume rdma capability first */
2449 smc
->fallback_rsn
= 0;
2450 rc
= sock_create_kern(net
, family
, SOCK_STREAM
, IPPROTO_TCP
,
2453 sk_common_release(sk
);
2456 smc
->sk
.sk_sndbuf
= max(smc
->clcsock
->sk
->sk_sndbuf
, SMC_BUF_MIN_SIZE
);
2457 smc
->sk
.sk_rcvbuf
= max(smc
->clcsock
->sk
->sk_rcvbuf
, SMC_BUF_MIN_SIZE
);
2463 static const struct net_proto_family smc_sock_family_ops
= {
2465 .owner
= THIS_MODULE
,
2466 .create
= smc_create
,
2469 unsigned int smc_net_id
;
2471 static __net_init
int smc_net_init(struct net
*net
)
2473 return smc_pnet_net_init(net
);
2476 static void __net_exit
smc_net_exit(struct net
*net
)
2478 smc_pnet_net_exit(net
);
2481 static struct pernet_operations smc_net_ops
= {
2482 .init
= smc_net_init
,
2483 .exit
= smc_net_exit
,
2485 .size
= sizeof(struct smc_net
),
2488 static int __init
smc_init(void)
2492 rc
= register_pernet_subsys(&smc_net_ops
);
2501 goto out_pernet_subsys
;
2503 rc
= smc_pnet_init();
2508 smc_hs_wq
= alloc_workqueue("smc_hs_wq", 0, 0);
2512 smc_close_wq
= alloc_workqueue("smc_close_wq", 0, 0);
2514 goto out_alloc_hs_wq
;
2516 rc
= smc_core_init();
2518 pr_err("%s: smc_core_init fails with %d\n", __func__
, rc
);
2522 rc
= smc_llc_init();
2524 pr_err("%s: smc_llc_init fails with %d\n", __func__
, rc
);
2528 rc
= smc_cdc_init();
2530 pr_err("%s: smc_cdc_init fails with %d\n", __func__
, rc
);
2534 rc
= proto_register(&smc_proto
, 1);
2536 pr_err("%s: proto_register(v4) fails with %d\n", __func__
, rc
);
2540 rc
= proto_register(&smc_proto6
, 1);
2542 pr_err("%s: proto_register(v6) fails with %d\n", __func__
, rc
);
2546 rc
= sock_register(&smc_sock_family_ops
);
2548 pr_err("%s: sock_register fails with %d\n", __func__
, rc
);
2551 INIT_HLIST_HEAD(&smc_v4_hashinfo
.ht
);
2552 INIT_HLIST_HEAD(&smc_v6_hashinfo
.ht
);
2554 rc
= smc_ib_register_client();
2556 pr_err("%s: ib_register fails with %d\n", __func__
, rc
);
2560 static_branch_enable(&tcp_have_smc
);
2564 sock_unregister(PF_SMC
);
2566 proto_unregister(&smc_proto6
);
2568 proto_unregister(&smc_proto
);
2572 destroy_workqueue(smc_close_wq
);
2574 destroy_workqueue(smc_hs_wq
);
2580 unregister_pernet_subsys(&smc_net_ops
);
2585 static void __exit
smc_exit(void)
2587 static_branch_disable(&tcp_have_smc
);
2588 sock_unregister(PF_SMC
);
2590 smc_ib_unregister_client();
2591 destroy_workqueue(smc_close_wq
);
2592 destroy_workqueue(smc_hs_wq
);
2593 proto_unregister(&smc_proto6
);
2594 proto_unregister(&smc_proto
);
2597 unregister_pernet_subsys(&smc_net_ops
);
2601 module_init(smc_init
);
2602 module_exit(smc_exit
);
2604 MODULE_AUTHOR("Ursula Braun <ubraun@linux.vnet.ibm.com>");
2605 MODULE_DESCRIPTION("smc socket address family");
2606 MODULE_LICENSE("GPL");
2607 MODULE_ALIAS_NETPROTO(PF_SMC
);