1 // SPDX-License-Identifier: GPL-2.0
3 * Shared Memory Communications over RDMA (SMC-R) and RoCE
5 * CLC (connection layer control) handshake over initial TCP socket to
6 * prepare for RDMA traffic
8 * Copyright IBM Corp. 2016, 2018
10 * Author(s): Ursula Braun <ubraun@linux.vnet.ibm.com>
14 #include <linux/inetdevice.h>
15 #include <linux/if_ether.h>
16 #include <linux/sched/signal.h>
17 #include <linux/utsname.h>
18 #include <linux/ctype.h>
20 #include <net/addrconf.h>
29 #include "smc_netlink.h"
31 #define SMCR_CLC_ACCEPT_CONFIRM_LEN 68
32 #define SMCD_CLC_ACCEPT_CONFIRM_LEN 48
33 #define SMCD_CLC_ACCEPT_CONFIRM_LEN_V2 78
34 #define SMCR_CLC_ACCEPT_CONFIRM_LEN_V2 108
35 #define SMC_CLC_RECV_BUF_LEN 100
37 /* eye catcher "SMCR" EBCDIC for CLC messages */
38 static const char SMC_EYECATCHER
[4] = {'\xe2', '\xd4', '\xc3', '\xd9'};
39 /* eye catcher "SMCD" EBCDIC for CLC messages */
40 static const char SMCD_EYECATCHER
[4] = {'\xe2', '\xd4', '\xc3', '\xc4'};
42 static u8 smc_hostname
[SMC_MAX_HOSTNAME_LEN
];
44 struct smc_clc_eid_table
{
46 struct list_head list
;
51 static struct smc_clc_eid_table smc_clc_eid_table
;
53 struct smc_clc_eid_entry
{
54 struct list_head list
;
55 u8 eid
[SMC_MAX_EID_LEN
];
58 /* The size of a user EID is 32 characters.
59 * Valid characters should be (single-byte character set) A-Z, 0-9, '.' and '-'.
60 * Blanks should only be used to pad to the expected size.
61 * First character must be alphanumeric.
63 static bool smc_clc_ueid_valid(char *ueid
)
65 char *end
= ueid
+ SMC_MAX_EID_LEN
;
67 while (--end
>= ueid
&& isspace(*end
))
71 if (!isalnum(*ueid
) || islower(*ueid
))
74 if ((!isalnum(*ueid
) || islower(*ueid
)) && *ueid
!= '.' &&
82 static int smc_clc_ueid_add(char *ueid
)
84 struct smc_clc_eid_entry
*new_ueid
, *tmp_ueid
;
87 if (!smc_clc_ueid_valid(ueid
))
90 /* add a new ueid entry to the ueid table if there isn't one */
91 new_ueid
= kzalloc(sizeof(*new_ueid
), GFP_KERNEL
);
94 memcpy(new_ueid
->eid
, ueid
, SMC_MAX_EID_LEN
);
96 write_lock(&smc_clc_eid_table
.lock
);
97 if (smc_clc_eid_table
.ueid_cnt
>= SMC_MAX_UEID
) {
101 list_for_each_entry(tmp_ueid
, &smc_clc_eid_table
.list
, list
) {
102 if (!memcmp(tmp_ueid
->eid
, ueid
, SMC_MAX_EID_LEN
)) {
107 list_add_tail(&new_ueid
->list
, &smc_clc_eid_table
.list
);
108 smc_clc_eid_table
.ueid_cnt
++;
109 write_unlock(&smc_clc_eid_table
.lock
);
113 write_unlock(&smc_clc_eid_table
.lock
);
118 int smc_clc_ueid_count(void)
122 read_lock(&smc_clc_eid_table
.lock
);
123 count
= smc_clc_eid_table
.ueid_cnt
;
124 read_unlock(&smc_clc_eid_table
.lock
);
129 int smc_nl_add_ueid(struct sk_buff
*skb
, struct genl_info
*info
)
131 struct nlattr
*nla_ueid
= info
->attrs
[SMC_NLA_EID_TABLE_ENTRY
];
134 if (!nla_ueid
|| nla_len(nla_ueid
) != SMC_MAX_EID_LEN
+ 1)
136 ueid
= (char *)nla_data(nla_ueid
);
138 return smc_clc_ueid_add(ueid
);
141 /* remove one or all ueid entries from the table */
142 static int smc_clc_ueid_remove(char *ueid
)
144 struct smc_clc_eid_entry
*lst_ueid
, *tmp_ueid
;
147 /* remove table entry */
148 write_lock(&smc_clc_eid_table
.lock
);
149 list_for_each_entry_safe(lst_ueid
, tmp_ueid
, &smc_clc_eid_table
.list
,
151 if (!ueid
|| !memcmp(lst_ueid
->eid
, ueid
, SMC_MAX_EID_LEN
)) {
152 list_del(&lst_ueid
->list
);
153 smc_clc_eid_table
.ueid_cnt
--;
158 #if IS_ENABLED(CONFIG_S390)
159 if (!rc
&& !smc_clc_eid_table
.ueid_cnt
) {
160 smc_clc_eid_table
.seid_enabled
= 1;
161 rc
= -EAGAIN
; /* indicate success and enabling of seid */
164 write_unlock(&smc_clc_eid_table
.lock
);
168 int smc_nl_remove_ueid(struct sk_buff
*skb
, struct genl_info
*info
)
170 struct nlattr
*nla_ueid
= info
->attrs
[SMC_NLA_EID_TABLE_ENTRY
];
173 if (!nla_ueid
|| nla_len(nla_ueid
) != SMC_MAX_EID_LEN
+ 1)
175 ueid
= (char *)nla_data(nla_ueid
);
177 return smc_clc_ueid_remove(ueid
);
180 int smc_nl_flush_ueid(struct sk_buff
*skb
, struct genl_info
*info
)
182 smc_clc_ueid_remove(NULL
);
186 static int smc_nl_ueid_dumpinfo(struct sk_buff
*skb
, u32 portid
, u32 seq
,
187 u32 flags
, char *ueid
)
189 char ueid_str
[SMC_MAX_EID_LEN
+ 1];
192 hdr
= genlmsg_put(skb
, portid
, seq
, &smc_gen_nl_family
,
193 flags
, SMC_NETLINK_DUMP_UEID
);
196 memcpy(ueid_str
, ueid
, SMC_MAX_EID_LEN
);
197 ueid_str
[SMC_MAX_EID_LEN
] = 0;
198 if (nla_put_string(skb
, SMC_NLA_EID_TABLE_ENTRY
, ueid_str
)) {
199 genlmsg_cancel(skb
, hdr
);
202 genlmsg_end(skb
, hdr
);
206 static int _smc_nl_ueid_dump(struct sk_buff
*skb
, u32 portid
, u32 seq
,
209 struct smc_clc_eid_entry
*lst_ueid
;
212 read_lock(&smc_clc_eid_table
.lock
);
213 list_for_each_entry(lst_ueid
, &smc_clc_eid_table
.list
, list
) {
214 if (idx
++ < start_idx
)
216 if (smc_nl_ueid_dumpinfo(skb
, portid
, seq
, NLM_F_MULTI
,
222 read_unlock(&smc_clc_eid_table
.lock
);
226 int smc_nl_dump_ueid(struct sk_buff
*skb
, struct netlink_callback
*cb
)
228 struct smc_nl_dmp_ctx
*cb_ctx
= smc_nl_dmp_ctx(cb
);
231 idx
= _smc_nl_ueid_dump(skb
, NETLINK_CB(cb
->skb
).portid
,
232 cb
->nlh
->nlmsg_seq
, cb_ctx
->pos
[0]);
234 cb_ctx
->pos
[0] = idx
;
238 int smc_nl_dump_seid(struct sk_buff
*skb
, struct netlink_callback
*cb
)
240 struct smc_nl_dmp_ctx
*cb_ctx
= smc_nl_dmp_ctx(cb
);
241 char seid_str
[SMC_MAX_EID_LEN
+ 1];
249 hdr
= genlmsg_put(skb
, NETLINK_CB(cb
->skb
).portid
, cb
->nlh
->nlmsg_seq
,
250 &smc_gen_nl_family
, NLM_F_MULTI
,
251 SMC_NETLINK_DUMP_SEID
);
254 if (!smc_ism_is_v2_capable())
257 smc_ism_get_system_eid(&seid
);
258 memcpy(seid_str
, seid
, SMC_MAX_EID_LEN
);
259 seid_str
[SMC_MAX_EID_LEN
] = 0;
260 if (nla_put_string(skb
, SMC_NLA_SEID_ENTRY
, seid_str
))
262 read_lock(&smc_clc_eid_table
.lock
);
263 seid_enabled
= smc_clc_eid_table
.seid_enabled
;
264 read_unlock(&smc_clc_eid_table
.lock
);
265 if (nla_put_u8(skb
, SMC_NLA_SEID_ENABLED
, seid_enabled
))
268 genlmsg_end(skb
, hdr
);
272 genlmsg_cancel(skb
, hdr
);
276 int smc_nl_enable_seid(struct sk_buff
*skb
, struct genl_info
*info
)
278 #if IS_ENABLED(CONFIG_S390)
279 write_lock(&smc_clc_eid_table
.lock
);
280 smc_clc_eid_table
.seid_enabled
= 1;
281 write_unlock(&smc_clc_eid_table
.lock
);
288 int smc_nl_disable_seid(struct sk_buff
*skb
, struct genl_info
*info
)
292 #if IS_ENABLED(CONFIG_S390)
293 write_lock(&smc_clc_eid_table
.lock
);
294 if (!smc_clc_eid_table
.ueid_cnt
)
297 smc_clc_eid_table
.seid_enabled
= 0;
298 write_unlock(&smc_clc_eid_table
.lock
);
305 static bool _smc_clc_match_ueid(u8
*peer_ueid
)
307 struct smc_clc_eid_entry
*tmp_ueid
;
309 list_for_each_entry(tmp_ueid
, &smc_clc_eid_table
.list
, list
) {
310 if (!memcmp(tmp_ueid
->eid
, peer_ueid
, SMC_MAX_EID_LEN
))
316 bool smc_clc_match_eid(u8
*negotiated_eid
,
317 struct smc_clc_v2_extension
*smc_v2_ext
,
318 u8
*peer_eid
, u8
*local_eid
)
323 negotiated_eid
[0] = 0;
324 read_lock(&smc_clc_eid_table
.lock
);
325 if (peer_eid
&& local_eid
&&
326 smc_clc_eid_table
.seid_enabled
&&
327 smc_v2_ext
->hdr
.flag
.seid
&&
328 !memcmp(peer_eid
, local_eid
, SMC_MAX_EID_LEN
)) {
329 memcpy(negotiated_eid
, peer_eid
, SMC_MAX_EID_LEN
);
334 for (i
= 0; i
< smc_v2_ext
->hdr
.eid_cnt
; i
++) {
335 if (_smc_clc_match_ueid(smc_v2_ext
->user_eids
[i
])) {
336 memcpy(negotiated_eid
, smc_v2_ext
->user_eids
[i
],
343 read_unlock(&smc_clc_eid_table
.lock
);
347 /* check arriving CLC proposal */
348 static bool smc_clc_msg_prop_valid(struct smc_clc_msg_proposal
*pclc
)
350 struct smc_clc_msg_proposal_prefix
*pclc_prfx
;
351 struct smc_clc_smcd_v2_extension
*smcd_v2_ext
;
352 struct smc_clc_msg_hdr
*hdr
= &pclc
->hdr
;
353 struct smc_clc_v2_extension
*v2_ext
;
355 v2_ext
= smc_get_clc_v2_ext(pclc
);
356 pclc_prfx
= smc_clc_proposal_get_prefix(pclc
);
357 if (hdr
->version
== SMC_V1
) {
358 if (hdr
->typev1
== SMC_TYPE_N
)
360 if (ntohs(hdr
->length
) !=
361 sizeof(*pclc
) + ntohs(pclc
->iparea_offset
) +
363 pclc_prfx
->ipv6_prefixes_cnt
*
364 sizeof(struct smc_clc_ipv6_prefix
) +
365 sizeof(struct smc_clc_msg_trail
))
368 if (ntohs(hdr
->length
) !=
370 sizeof(struct smc_clc_msg_smcd
) +
371 (hdr
->typev1
!= SMC_TYPE_N
?
373 pclc_prfx
->ipv6_prefixes_cnt
*
374 sizeof(struct smc_clc_ipv6_prefix
) : 0) +
375 (hdr
->typev2
!= SMC_TYPE_N
?
377 v2_ext
->hdr
.eid_cnt
* SMC_MAX_EID_LEN
: 0) +
378 (smcd_indicated(hdr
->typev2
) ?
379 sizeof(*smcd_v2_ext
) + v2_ext
->hdr
.ism_gid_cnt
*
380 sizeof(struct smc_clc_smcd_gid_chid
) :
382 sizeof(struct smc_clc_msg_trail
))
388 /* check arriving CLC accept or confirm */
390 smc_clc_msg_acc_conf_valid(struct smc_clc_msg_accept_confirm
*clc
)
392 struct smc_clc_msg_hdr
*hdr
= &clc
->hdr
;
394 if (hdr
->typev1
!= SMC_TYPE_R
&& hdr
->typev1
!= SMC_TYPE_D
)
396 if (hdr
->version
== SMC_V1
) {
397 if ((hdr
->typev1
== SMC_TYPE_R
&&
398 ntohs(hdr
->length
) != SMCR_CLC_ACCEPT_CONFIRM_LEN
) ||
399 (hdr
->typev1
== SMC_TYPE_D
&&
400 ntohs(hdr
->length
) != SMCD_CLC_ACCEPT_CONFIRM_LEN
))
403 if (hdr
->typev1
== SMC_TYPE_D
&&
404 ntohs(hdr
->length
) < SMCD_CLC_ACCEPT_CONFIRM_LEN_V2
)
406 if (hdr
->typev1
== SMC_TYPE_R
&&
407 ntohs(hdr
->length
) < SMCR_CLC_ACCEPT_CONFIRM_LEN_V2
)
413 /* check arriving CLC decline */
415 smc_clc_msg_decl_valid(struct smc_clc_msg_decline
*dclc
)
417 struct smc_clc_msg_hdr
*hdr
= &dclc
->hdr
;
419 if (hdr
->typev1
!= SMC_TYPE_R
&& hdr
->typev1
!= SMC_TYPE_D
)
421 if (hdr
->version
== SMC_V1
) {
422 if (ntohs(hdr
->length
) != sizeof(struct smc_clc_msg_decline
))
425 if (ntohs(hdr
->length
) != sizeof(struct smc_clc_msg_decline_v2
))
431 static int smc_clc_fill_fce_v2x(struct smc_clc_first_contact_ext_v2x
*fce_v2x
,
432 struct smc_init_info
*ini
)
434 int ret
= sizeof(*fce_v2x
);
436 memset(fce_v2x
, 0, sizeof(*fce_v2x
));
437 fce_v2x
->fce_v2_base
.os_type
= SMC_CLC_OS_LINUX
;
438 fce_v2x
->fce_v2_base
.release
= ini
->release_nr
;
439 memcpy(fce_v2x
->fce_v2_base
.hostname
,
440 smc_hostname
, sizeof(smc_hostname
));
441 if (ini
->is_smcd
&& ini
->release_nr
< SMC_RELEASE_1
) {
442 ret
= sizeof(struct smc_clc_first_contact_ext
);
446 if (ini
->release_nr
>= SMC_RELEASE_1
) {
448 fce_v2x
->max_conns
= ini
->max_conns
;
449 fce_v2x
->max_links
= ini
->max_links
;
451 fce_v2x
->feature_mask
= htons(ini
->feature_mask
);
458 /* check if received message has a correct header length and contains valid
459 * heading and trailing eyecatchers
461 static bool smc_clc_msg_hdr_valid(struct smc_clc_msg_hdr
*clcm
, bool check_trl
)
463 struct smc_clc_msg_accept_confirm
*clc
;
464 struct smc_clc_msg_proposal
*pclc
;
465 struct smc_clc_msg_decline
*dclc
;
466 struct smc_clc_msg_trail
*trl
;
468 if (memcmp(clcm
->eyecatcher
, SMC_EYECATCHER
, sizeof(SMC_EYECATCHER
)) &&
469 memcmp(clcm
->eyecatcher
, SMCD_EYECATCHER
, sizeof(SMCD_EYECATCHER
)))
471 switch (clcm
->type
) {
472 case SMC_CLC_PROPOSAL
:
473 pclc
= (struct smc_clc_msg_proposal
*)clcm
;
474 if (!smc_clc_msg_prop_valid(pclc
))
476 trl
= (struct smc_clc_msg_trail
*)
477 ((u8
*)pclc
+ ntohs(pclc
->hdr
.length
) - sizeof(*trl
));
480 case SMC_CLC_CONFIRM
:
481 clc
= (struct smc_clc_msg_accept_confirm
*)clcm
;
482 if (!smc_clc_msg_acc_conf_valid(clc
))
484 trl
= (struct smc_clc_msg_trail
*)
485 ((u8
*)clc
+ ntohs(clc
->hdr
.length
) - sizeof(*trl
));
487 case SMC_CLC_DECLINE
:
488 dclc
= (struct smc_clc_msg_decline
*)clcm
;
489 if (!smc_clc_msg_decl_valid(dclc
))
497 memcmp(trl
->eyecatcher
, SMC_EYECATCHER
, sizeof(SMC_EYECATCHER
)) &&
498 memcmp(trl
->eyecatcher
, SMCD_EYECATCHER
, sizeof(SMCD_EYECATCHER
)))
503 /* find ipv4 addr on device and get the prefix len, fill CLC proposal msg */
504 static int smc_clc_prfx_set4_rcu(struct dst_entry
*dst
, __be32 ipv4
,
505 struct smc_clc_msg_proposal_prefix
*prop
)
507 struct in_device
*in_dev
= __in_dev_get_rcu(dst
->dev
);
508 const struct in_ifaddr
*ifa
;
513 in_dev_for_each_ifa_rcu(ifa
, in_dev
) {
514 if (!inet_ifa_match(ipv4
, ifa
))
516 prop
->prefix_len
= inet_mask_len(ifa
->ifa_mask
);
517 prop
->outgoing_subnet
= ifa
->ifa_address
& ifa
->ifa_mask
;
518 /* prop->ipv6_prefixes_cnt = 0; already done by memset before */
524 /* fill CLC proposal msg with ipv6 prefixes from device */
525 static int smc_clc_prfx_set6_rcu(struct dst_entry
*dst
,
526 struct smc_clc_msg_proposal_prefix
*prop
,
527 struct smc_clc_ipv6_prefix
*ipv6_prfx
)
529 #if IS_ENABLED(CONFIG_IPV6)
530 struct inet6_dev
*in6_dev
= __in6_dev_get(dst
->dev
);
531 struct inet6_ifaddr
*ifa
;
536 /* use a maximum of 8 IPv6 prefixes from device */
537 list_for_each_entry(ifa
, &in6_dev
->addr_list
, if_list
) {
538 if (ipv6_addr_type(&ifa
->addr
) & IPV6_ADDR_LINKLOCAL
)
540 ipv6_addr_prefix(&ipv6_prfx
[cnt
].prefix
,
541 &ifa
->addr
, ifa
->prefix_len
);
542 ipv6_prfx
[cnt
].prefix_len
= ifa
->prefix_len
;
544 if (cnt
== SMC_CLC_MAX_V6_PREFIX
)
547 prop
->ipv6_prefixes_cnt
= cnt
;
554 /* retrieve and set prefixes in CLC proposal msg */
555 static int smc_clc_prfx_set(struct socket
*clcsock
,
556 struct smc_clc_msg_proposal_prefix
*prop
,
557 struct smc_clc_ipv6_prefix
*ipv6_prfx
)
559 struct dst_entry
*dst
= sk_dst_get(clcsock
->sk
);
560 struct sockaddr_storage addrs
;
561 struct sockaddr_in6
*addr6
;
562 struct sockaddr_in
*addr
;
573 /* get address to which the internal TCP socket is bound */
574 if (kernel_getsockname(clcsock
, (struct sockaddr
*)&addrs
) < 0)
576 /* analyze IP specific data of net_device belonging to TCP socket */
577 addr6
= (struct sockaddr_in6
*)&addrs
;
579 if (addrs
.ss_family
== PF_INET
) {
581 addr
= (struct sockaddr_in
*)&addrs
;
582 rc
= smc_clc_prfx_set4_rcu(dst
, addr
->sin_addr
.s_addr
, prop
);
583 } else if (ipv6_addr_v4mapped(&addr6
->sin6_addr
)) {
584 /* mapped IPv4 address - peer is IPv4 only */
585 rc
= smc_clc_prfx_set4_rcu(dst
, addr6
->sin6_addr
.s6_addr32
[3],
589 rc
= smc_clc_prfx_set6_rcu(dst
, prop
, ipv6_prfx
);
598 /* match ipv4 addrs of dev against addr in CLC proposal */
599 static int smc_clc_prfx_match4_rcu(struct net_device
*dev
,
600 struct smc_clc_msg_proposal_prefix
*prop
)
602 struct in_device
*in_dev
= __in_dev_get_rcu(dev
);
603 const struct in_ifaddr
*ifa
;
607 in_dev_for_each_ifa_rcu(ifa
, in_dev
) {
608 if (prop
->prefix_len
== inet_mask_len(ifa
->ifa_mask
) &&
609 inet_ifa_match(prop
->outgoing_subnet
, ifa
))
616 /* match ipv6 addrs of dev against addrs in CLC proposal */
617 static int smc_clc_prfx_match6_rcu(struct net_device
*dev
,
618 struct smc_clc_msg_proposal_prefix
*prop
)
620 #if IS_ENABLED(CONFIG_IPV6)
621 struct inet6_dev
*in6_dev
= __in6_dev_get(dev
);
622 struct smc_clc_ipv6_prefix
*ipv6_prfx
;
623 struct inet6_ifaddr
*ifa
;
628 /* ipv6 prefix list starts behind smc_clc_msg_proposal_prefix */
629 ipv6_prfx
= (struct smc_clc_ipv6_prefix
*)((u8
*)prop
+ sizeof(*prop
));
630 max
= min_t(u8
, prop
->ipv6_prefixes_cnt
, SMC_CLC_MAX_V6_PREFIX
);
631 list_for_each_entry(ifa
, &in6_dev
->addr_list
, if_list
) {
632 if (ipv6_addr_type(&ifa
->addr
) & IPV6_ADDR_LINKLOCAL
)
634 for (i
= 0; i
< max
; i
++) {
635 if (ifa
->prefix_len
== ipv6_prfx
[i
].prefix_len
&&
636 ipv6_prefix_equal(&ifa
->addr
, &ipv6_prfx
[i
].prefix
,
645 /* check if proposed prefixes match one of our device prefixes */
646 int smc_clc_prfx_match(struct socket
*clcsock
,
647 struct smc_clc_msg_proposal_prefix
*prop
)
649 struct dst_entry
*dst
= sk_dst_get(clcsock
->sk
);
661 if (!prop
->ipv6_prefixes_cnt
)
662 rc
= smc_clc_prfx_match4_rcu(dst
->dev
, prop
);
664 rc
= smc_clc_prfx_match6_rcu(dst
->dev
, prop
);
672 /* Wait for data on the tcp-socket, analyze received data
674 * 0 if success and it was not a decline that we received.
675 * SMC_CLC_DECL_REPLY if decline received for fallback w/o another decl send.
676 * clcsock error, -EINTR, -ECONNRESET, -EPROTO otherwise.
678 int smc_clc_wait_msg(struct smc_sock
*smc
, void *buf
, int buflen
,
679 u8 expected_type
, unsigned long timeout
)
681 long rcvtimeo
= smc
->clcsock
->sk
->sk_rcvtimeo
;
682 struct sock
*clc_sk
= smc
->clcsock
->sk
;
683 struct smc_clc_msg_hdr
*clcm
= buf
;
684 struct msghdr msg
= {NULL
, 0};
686 struct kvec vec
= {buf
, buflen
};
687 int len
, datlen
, recvlen
;
688 bool check_trl
= true;
691 /* peek the first few bytes to determine length of data to receive
692 * so we don't consume any subsequent CLC message or payload data
693 * in the TCP byte stream
696 * Caller must make sure that buflen is no less than
697 * sizeof(struct smc_clc_msg_hdr)
699 krflags
= MSG_PEEK
| MSG_WAITALL
;
700 clc_sk
->sk_rcvtimeo
= timeout
;
701 iov_iter_kvec(&msg
.msg_iter
, ITER_DEST
, &vec
, 1,
702 sizeof(struct smc_clc_msg_hdr
));
703 len
= sock_recvmsg(smc
->clcsock
, &msg
, krflags
);
704 if (signal_pending(current
)) {
705 reason_code
= -EINTR
;
706 clc_sk
->sk_err
= EINTR
;
707 smc
->sk
.sk_err
= EINTR
;
710 if (clc_sk
->sk_err
) {
711 reason_code
= -clc_sk
->sk_err
;
712 if (clc_sk
->sk_err
== EAGAIN
&&
713 expected_type
== SMC_CLC_DECLINE
)
714 clc_sk
->sk_err
= 0; /* reset for fallback usage */
716 smc
->sk
.sk_err
= clc_sk
->sk_err
;
719 if (!len
) { /* peer has performed orderly shutdown */
720 smc
->sk
.sk_err
= ECONNRESET
;
721 reason_code
= -ECONNRESET
;
725 if (len
!= -EAGAIN
|| expected_type
!= SMC_CLC_DECLINE
)
726 smc
->sk
.sk_err
= -len
;
730 datlen
= ntohs(clcm
->length
);
731 if ((len
< sizeof(struct smc_clc_msg_hdr
)) ||
732 (clcm
->version
< SMC_V1
) ||
733 ((clcm
->type
!= SMC_CLC_DECLINE
) &&
734 (clcm
->type
!= expected_type
))) {
735 smc
->sk
.sk_err
= EPROTO
;
736 reason_code
= -EPROTO
;
740 /* receive the complete CLC message */
741 memset(&msg
, 0, sizeof(struct msghdr
));
742 if (datlen
> buflen
) {
748 iov_iter_kvec(&msg
.msg_iter
, ITER_DEST
, &vec
, 1, recvlen
);
749 krflags
= MSG_WAITALL
;
750 len
= sock_recvmsg(smc
->clcsock
, &msg
, krflags
);
751 if (len
< recvlen
|| !smc_clc_msg_hdr_valid(clcm
, check_trl
)) {
752 smc
->sk
.sk_err
= EPROTO
;
753 reason_code
= -EPROTO
;
758 u8 tmp
[SMC_CLC_RECV_BUF_LEN
];
761 vec
.iov_len
= SMC_CLC_RECV_BUF_LEN
;
762 /* receive remaining proposal message */
763 recvlen
= datlen
> SMC_CLC_RECV_BUF_LEN
?
764 SMC_CLC_RECV_BUF_LEN
: datlen
;
765 iov_iter_kvec(&msg
.msg_iter
, ITER_DEST
, &vec
, 1, recvlen
);
766 len
= sock_recvmsg(smc
->clcsock
, &msg
, krflags
);
769 if (clcm
->type
== SMC_CLC_DECLINE
) {
770 struct smc_clc_msg_decline
*dclc
;
772 dclc
= (struct smc_clc_msg_decline
*)clcm
;
773 reason_code
= SMC_CLC_DECL_PEERDECL
;
774 smc
->peer_diagnosis
= ntohl(dclc
->peer_diagnosis
);
775 if (((struct smc_clc_msg_decline
*)buf
)->hdr
.typev2
&
776 SMC_FIRST_CONTACT_MASK
) {
777 smc
->conn
.lgr
->sync_err
= 1;
778 smc_lgr_terminate_sched(smc
->conn
.lgr
);
783 clc_sk
->sk_rcvtimeo
= rcvtimeo
;
787 /* send CLC DECLINE message across internal TCP socket */
788 int smc_clc_send_decline(struct smc_sock
*smc
, u32 peer_diag_info
, u8 version
)
790 struct smc_clc_msg_decline
*dclc_v1
;
791 struct smc_clc_msg_decline_v2 dclc
;
796 dclc_v1
= (struct smc_clc_msg_decline
*)&dclc
;
797 memset(&dclc
, 0, sizeof(dclc
));
798 memcpy(dclc
.hdr
.eyecatcher
, SMC_EYECATCHER
, sizeof(SMC_EYECATCHER
));
799 dclc
.hdr
.type
= SMC_CLC_DECLINE
;
800 dclc
.hdr
.version
= version
;
801 dclc
.os_type
= version
== SMC_V1
? 0 : SMC_CLC_OS_LINUX
;
802 dclc
.hdr
.typev2
= (peer_diag_info
== SMC_CLC_DECL_SYNCERR
) ?
803 SMC_FIRST_CONTACT_MASK
: 0;
804 if ((!smc_conn_lgr_valid(&smc
->conn
) || !smc
->conn
.lgr
->is_smcd
) &&
805 smc_ib_is_valid_local_systemid())
806 memcpy(dclc
.id_for_peer
, local_systemid
,
807 sizeof(local_systemid
));
808 dclc
.peer_diagnosis
= htonl(peer_diag_info
);
809 if (version
== SMC_V1
) {
810 memcpy(dclc_v1
->trl
.eyecatcher
, SMC_EYECATCHER
,
811 sizeof(SMC_EYECATCHER
));
812 send_len
= sizeof(*dclc_v1
);
814 memcpy(dclc
.trl
.eyecatcher
, SMC_EYECATCHER
,
815 sizeof(SMC_EYECATCHER
));
816 send_len
= sizeof(dclc
);
818 dclc
.hdr
.length
= htons(send_len
);
820 memset(&msg
, 0, sizeof(msg
));
821 vec
.iov_base
= &dclc
;
822 vec
.iov_len
= send_len
;
823 len
= kernel_sendmsg(smc
->clcsock
, &msg
, &vec
, 1, send_len
);
824 if (len
< 0 || len
< send_len
)
826 return len
> 0 ? 0 : len
;
829 /* send CLC PROPOSAL message across internal TCP socket */
830 int smc_clc_send_proposal(struct smc_sock
*smc
, struct smc_init_info
*ini
)
832 struct smc_clc_smcd_v2_extension
*smcd_v2_ext
;
833 struct smc_clc_msg_proposal_prefix
*pclc_prfx
;
834 struct smc_clc_msg_proposal
*pclc_base
;
835 struct smc_clc_smcd_gid_chid
*gidchids
;
836 struct smc_clc_msg_proposal_area
*pclc
;
837 struct smc_clc_ipv6_prefix
*ipv6_prfx
;
838 struct net
*net
= sock_net(&smc
->sk
);
839 struct smc_clc_v2_extension
*v2_ext
;
840 struct smc_clc_msg_smcd
*pclc_smcd
;
841 struct smc_clc_msg_trail
*trl
;
842 struct smcd_dev
*smcd
;
843 int len
, i
, plen
, rc
;
848 pclc
= kzalloc(sizeof(*pclc
), GFP_KERNEL
);
852 pclc_base
= &pclc
->pclc_base
;
853 pclc_smcd
= &pclc
->pclc_smcd
;
854 pclc_prfx
= &pclc
->pclc_prfx
;
855 ipv6_prfx
= pclc
->pclc_prfx_ipv6
;
856 v2_ext
= container_of(&pclc
->pclc_v2_ext
,
857 struct smc_clc_v2_extension
, fixed
);
858 smcd_v2_ext
= container_of(&pclc
->pclc_smcd_v2_ext
,
859 struct smc_clc_smcd_v2_extension
, fixed
);
860 gidchids
= pclc
->pclc_gidchids
;
861 trl
= &pclc
->pclc_trl
;
863 pclc_base
->hdr
.version
= SMC_V2
;
864 pclc_base
->hdr
.typev1
= ini
->smc_type_v1
;
865 pclc_base
->hdr
.typev2
= ini
->smc_type_v2
;
866 plen
= sizeof(*pclc_base
) + sizeof(*pclc_smcd
) + sizeof(*trl
);
868 /* retrieve ip prefixes for CLC proposal msg */
869 if (ini
->smc_type_v1
!= SMC_TYPE_N
) {
870 rc
= smc_clc_prfx_set(smc
->clcsock
, pclc_prfx
, ipv6_prfx
);
872 if (ini
->smc_type_v2
== SMC_TYPE_N
) {
874 return SMC_CLC_DECL_CNFERR
;
876 pclc_base
->hdr
.typev1
= SMC_TYPE_N
;
878 pclc_base
->iparea_offset
= htons(sizeof(*pclc_smcd
));
879 plen
+= sizeof(*pclc_prfx
) +
880 pclc_prfx
->ipv6_prefixes_cnt
*
881 sizeof(ipv6_prfx
[0]);
885 /* build SMC Proposal CLC message */
886 memcpy(pclc_base
->hdr
.eyecatcher
, SMC_EYECATCHER
,
887 sizeof(SMC_EYECATCHER
));
888 pclc_base
->hdr
.type
= SMC_CLC_PROPOSAL
;
889 if (smcr_indicated(ini
->smc_type_v1
)) {
890 /* add SMC-R specifics */
891 memcpy(pclc_base
->lcl
.id_for_peer
, local_systemid
,
892 sizeof(local_systemid
));
893 memcpy(pclc_base
->lcl
.gid
, ini
->ib_gid
, SMC_GID_SIZE
);
894 memcpy(pclc_base
->lcl
.mac
, &ini
->ib_dev
->mac
[ini
->ib_port
- 1],
897 if (smcd_indicated(ini
->smc_type_v1
)) {
898 struct smcd_gid smcd_gid
;
900 /* add SMC-D specifics */
901 if (ini
->ism_dev
[0]) {
902 smcd
= ini
->ism_dev
[0];
903 smcd
->ops
->get_local_gid(smcd
, &smcd_gid
);
904 pclc_smcd
->ism
.gid
= htonll(smcd_gid
.gid
);
905 pclc_smcd
->ism
.chid
=
906 htons(smc_ism_get_chid(ini
->ism_dev
[0]));
909 if (ini
->smc_type_v2
== SMC_TYPE_N
) {
910 pclc_smcd
->v2_ext_offset
= 0;
912 struct smc_clc_eid_entry
*ueident
;
915 v2_ext
->hdr
.flag
.release
= SMC_RELEASE
;
916 v2_ext_offset
= sizeof(*pclc_smcd
) -
917 offsetofend(struct smc_clc_msg_smcd
, v2_ext_offset
);
918 if (ini
->smc_type_v1
!= SMC_TYPE_N
)
919 v2_ext_offset
+= sizeof(*pclc_prfx
) +
920 pclc_prfx
->ipv6_prefixes_cnt
*
921 sizeof(ipv6_prfx
[0]);
922 pclc_smcd
->v2_ext_offset
= htons(v2_ext_offset
);
923 plen
+= sizeof(*v2_ext
);
925 v2_ext
->feature_mask
= htons(SMC_FEATURE_MASK
);
926 read_lock(&smc_clc_eid_table
.lock
);
927 v2_ext
->hdr
.eid_cnt
= smc_clc_eid_table
.ueid_cnt
;
928 plen
+= smc_clc_eid_table
.ueid_cnt
* SMC_MAX_EID_LEN
;
930 list_for_each_entry(ueident
, &smc_clc_eid_table
.list
, list
) {
931 memcpy(v2_ext
->user_eids
[i
++], ueident
->eid
,
932 sizeof(ueident
->eid
));
934 read_unlock(&smc_clc_eid_table
.lock
);
936 if (smcd_indicated(ini
->smc_type_v2
)) {
937 struct smcd_gid smcd_gid
;
941 v2_ext
->hdr
.flag
.seid
= smc_clc_eid_table
.seid_enabled
;
942 v2_ext
->hdr
.smcd_v2_ext_offset
= htons(sizeof(*v2_ext
) -
943 offsetofend(struct smc_clnt_opts_area_hdr
,
944 smcd_v2_ext_offset
) +
945 v2_ext
->hdr
.eid_cnt
* SMC_MAX_EID_LEN
);
946 smc_ism_get_system_eid(&eid
);
947 if (eid
&& v2_ext
->hdr
.flag
.seid
)
948 memcpy(smcd_v2_ext
->system_eid
, eid
, SMC_MAX_EID_LEN
);
949 plen
+= sizeof(*smcd_v2_ext
);
950 if (ini
->ism_offered_cnt
) {
951 for (i
= 1; i
<= ini
->ism_offered_cnt
; i
++) {
952 smcd
= ini
->ism_dev
[i
];
953 smcd
->ops
->get_local_gid(smcd
, &smcd_gid
);
954 gidchids
[entry
].chid
=
955 htons(smc_ism_get_chid(ini
->ism_dev
[i
]));
956 gidchids
[entry
].gid
= htonll(smcd_gid
.gid
);
957 if (smc_ism_is_emulated(smcd
)) {
958 /* an Emulated-ISM device takes two
959 * entries. CHID of the second entry
960 * repeats that of the first entry.
962 gidchids
[entry
+ 1].chid
=
963 gidchids
[entry
].chid
;
964 gidchids
[entry
+ 1].gid
=
965 htonll(smcd_gid
.gid_ext
);
970 plen
+= entry
* sizeof(struct smc_clc_smcd_gid_chid
);
972 v2_ext
->hdr
.ism_gid_cnt
= entry
;
974 if (smcr_indicated(ini
->smc_type_v2
)) {
975 memcpy(v2_ext
->roce
, ini
->smcrv2
.ib_gid_v2
, SMC_GID_SIZE
);
976 v2_ext
->max_conns
= net
->smc
.sysctl_max_conns_per_lgr
;
977 v2_ext
->max_links
= net
->smc
.sysctl_max_links_per_lgr
;
980 pclc_base
->hdr
.length
= htons(plen
);
981 memcpy(trl
->eyecatcher
, SMC_EYECATCHER
, sizeof(SMC_EYECATCHER
));
983 /* send SMC Proposal CLC message */
984 memset(&msg
, 0, sizeof(msg
));
986 vec
[i
].iov_base
= pclc_base
;
987 vec
[i
++].iov_len
= sizeof(*pclc_base
);
988 vec
[i
].iov_base
= pclc_smcd
;
989 vec
[i
++].iov_len
= sizeof(*pclc_smcd
);
990 if (ini
->smc_type_v1
!= SMC_TYPE_N
) {
991 vec
[i
].iov_base
= pclc_prfx
;
992 vec
[i
++].iov_len
= sizeof(*pclc_prfx
);
993 if (pclc_prfx
->ipv6_prefixes_cnt
> 0) {
994 vec
[i
].iov_base
= ipv6_prfx
;
995 vec
[i
++].iov_len
= pclc_prfx
->ipv6_prefixes_cnt
*
996 sizeof(ipv6_prfx
[0]);
999 if (ini
->smc_type_v2
!= SMC_TYPE_N
) {
1000 vec
[i
].iov_base
= v2_ext
;
1001 vec
[i
++].iov_len
= sizeof(*v2_ext
) +
1002 (v2_ext
->hdr
.eid_cnt
* SMC_MAX_EID_LEN
);
1003 if (smcd_indicated(ini
->smc_type_v2
)) {
1004 vec
[i
].iov_base
= smcd_v2_ext
;
1005 vec
[i
++].iov_len
= sizeof(*smcd_v2_ext
);
1006 if (ini
->ism_offered_cnt
) {
1007 vec
[i
].iov_base
= gidchids
;
1008 vec
[i
++].iov_len
= v2_ext
->hdr
.ism_gid_cnt
*
1009 sizeof(struct smc_clc_smcd_gid_chid
);
1013 vec
[i
].iov_base
= trl
;
1014 vec
[i
++].iov_len
= sizeof(*trl
);
1015 /* due to the few bytes needed for clc-handshake this cannot block */
1016 len
= kernel_sendmsg(smc
->clcsock
, &msg
, vec
, i
, plen
);
1018 smc
->sk
.sk_err
= smc
->clcsock
->sk
->sk_err
;
1019 reason_code
= -smc
->sk
.sk_err
;
1020 } else if (len
< ntohs(pclc_base
->hdr
.length
)) {
1021 reason_code
= -ENETUNREACH
;
1022 smc
->sk
.sk_err
= -reason_code
;
1030 smcd_clc_prep_confirm_accept(struct smc_connection
*conn
,
1031 struct smc_clc_msg_accept_confirm
*clc
,
1032 int first_contact
, u8 version
,
1033 u8
*eid
, struct smc_init_info
*ini
,
1035 struct smc_clc_first_contact_ext_v2x
*fce_v2x
,
1036 struct smc_clc_msg_trail
*trl
)
1038 struct smcd_dev
*smcd
= conn
->lgr
->smcd
;
1039 struct smcd_gid smcd_gid
;
1043 /* SMC-D specific settings */
1044 memcpy(clc
->hdr
.eyecatcher
, SMCD_EYECATCHER
,
1045 sizeof(SMCD_EYECATCHER
));
1046 smcd
->ops
->get_local_gid(smcd
, &smcd_gid
);
1047 clc
->hdr
.typev1
= SMC_TYPE_D
;
1048 clc
->d0
.gid
= htonll(smcd_gid
.gid
);
1049 clc
->d0
.token
= htonll(conn
->rmb_desc
->token
);
1050 clc
->d0
.dmbe_size
= conn
->rmbe_size_comp
;
1051 clc
->d0
.dmbe_idx
= 0;
1052 memcpy(&clc
->d0
.linkid
, conn
->lgr
->id
, SMC_LGR_ID_SIZE
);
1053 if (version
== SMC_V1
) {
1054 clc
->hdr
.length
= htons(SMCD_CLC_ACCEPT_CONFIRM_LEN
);
1056 chid
= smc_ism_get_chid(smcd
);
1057 clc
->d1
.chid
= htons(chid
);
1059 memcpy(clc
->d1
.eid
, eid
, SMC_MAX_EID_LEN
);
1060 if (__smc_ism_is_emulated(chid
))
1061 clc
->d1
.gid_ext
= htonll(smcd_gid
.gid_ext
);
1062 len
= SMCD_CLC_ACCEPT_CONFIRM_LEN_V2
;
1063 if (first_contact
) {
1064 *fce_len
= smc_clc_fill_fce_v2x(fce_v2x
, ini
);
1067 clc
->hdr
.length
= htons(len
);
1069 memcpy(trl
->eyecatcher
, SMCD_EYECATCHER
,
1070 sizeof(SMCD_EYECATCHER
));
1074 smcr_clc_prep_confirm_accept(struct smc_connection
*conn
,
1075 struct smc_clc_msg_accept_confirm
*clc
,
1076 int first_contact
, u8 version
,
1077 u8
*eid
, struct smc_init_info
*ini
,
1079 struct smc_clc_first_contact_ext_v2x
*fce_v2x
,
1080 struct smc_clc_fce_gid_ext
*gle
,
1081 struct smc_clc_msg_trail
*trl
)
1083 struct smc_link
*link
= conn
->lnk
;
1086 /* SMC-R specific settings */
1087 memcpy(clc
->hdr
.eyecatcher
, SMC_EYECATCHER
,
1088 sizeof(SMC_EYECATCHER
));
1089 clc
->hdr
.typev1
= SMC_TYPE_R
;
1090 memcpy(clc
->r0
.lcl
.id_for_peer
, local_systemid
,
1091 sizeof(local_systemid
));
1092 memcpy(&clc
->r0
.lcl
.gid
, link
->gid
, SMC_GID_SIZE
);
1093 memcpy(&clc
->r0
.lcl
.mac
, &link
->smcibdev
->mac
[link
->ibport
- 1],
1095 hton24(clc
->r0
.qpn
, link
->roce_qp
->qp_num
);
1097 htonl(conn
->rmb_desc
->mr
[link
->link_idx
]->rkey
);
1098 clc
->r0
.rmbe_idx
= 1; /* for now: 1 RMB = 1 RMBE */
1099 clc
->r0
.rmbe_alert_token
= htonl(conn
->alert_token_local
);
1100 switch (clc
->hdr
.type
) {
1101 case SMC_CLC_ACCEPT
:
1102 clc
->r0
.qp_mtu
= link
->path_mtu
;
1104 case SMC_CLC_CONFIRM
:
1105 clc
->r0
.qp_mtu
= min(link
->path_mtu
, link
->peer_mtu
);
1108 clc
->r0
.rmbe_size
= conn
->rmbe_size_comp
;
1109 clc
->r0
.rmb_dma_addr
= conn
->rmb_desc
->is_vm
?
1110 cpu_to_be64((uintptr_t)conn
->rmb_desc
->cpu_addr
) :
1111 cpu_to_be64((u64
)sg_dma_address
1112 (conn
->rmb_desc
->sgt
[link
->link_idx
].sgl
));
1113 hton24(clc
->r0
.psn
, link
->psn_initial
);
1114 if (version
== SMC_V1
) {
1115 clc
->hdr
.length
= htons(SMCR_CLC_ACCEPT_CONFIRM_LEN
);
1118 memcpy(clc
->r1
.eid
, eid
, SMC_MAX_EID_LEN
);
1119 len
= SMCR_CLC_ACCEPT_CONFIRM_LEN_V2
;
1120 if (first_contact
) {
1121 *fce_len
= smc_clc_fill_fce_v2x(fce_v2x
, ini
);
1123 fce_v2x
->fce_v2_base
.v2_direct
=
1124 !link
->lgr
->uses_gateway
;
1125 if (clc
->hdr
.type
== SMC_CLC_CONFIRM
) {
1126 memset(gle
, 0, sizeof(*gle
));
1127 gle
->gid_cnt
= ini
->smcrv2
.gidlist
.len
;
1128 len
+= sizeof(*gle
);
1129 len
+= gle
->gid_cnt
* sizeof(gle
->gid
[0]);
1132 clc
->hdr
.length
= htons(len
);
1134 memcpy(trl
->eyecatcher
, SMC_EYECATCHER
, sizeof(SMC_EYECATCHER
));
1137 /* build and send CLC CONFIRM / ACCEPT message */
1138 static int smc_clc_send_confirm_accept(struct smc_sock
*smc
,
1139 struct smc_clc_msg_accept_confirm
*clc
,
1140 int first_contact
, u8 version
,
1141 u8
*eid
, struct smc_init_info
*ini
)
1143 struct smc_clc_first_contact_ext_v2x fce_v2x
;
1144 struct smc_connection
*conn
= &smc
->conn
;
1145 struct smc_clc_fce_gid_ext gle
;
1146 struct smc_clc_msg_trail trl
;
1151 /* send SMC Confirm CLC msg */
1152 clc
->hdr
.version
= version
; /* SMC version */
1154 clc
->hdr
.typev2
|= SMC_FIRST_CONTACT_MASK
;
1155 if (conn
->lgr
->is_smcd
)
1156 smcd_clc_prep_confirm_accept(conn
, clc
, first_contact
,
1157 version
, eid
, ini
, &fce_len
,
1160 smcr_clc_prep_confirm_accept(conn
, clc
, first_contact
,
1161 version
, eid
, ini
, &fce_len
,
1162 &fce_v2x
, &gle
, &trl
);
1163 memset(&msg
, 0, sizeof(msg
));
1165 vec
[i
].iov_base
= clc
;
1166 if (version
> SMC_V1
)
1167 vec
[i
++].iov_len
= (clc
->hdr
.typev1
== SMC_TYPE_D
?
1168 SMCD_CLC_ACCEPT_CONFIRM_LEN_V2
:
1169 SMCR_CLC_ACCEPT_CONFIRM_LEN_V2
) -
1172 vec
[i
++].iov_len
= (clc
->hdr
.typev1
== SMC_TYPE_D
?
1173 SMCD_CLC_ACCEPT_CONFIRM_LEN
:
1174 SMCR_CLC_ACCEPT_CONFIRM_LEN
) -
1176 if (version
> SMC_V1
&& first_contact
) {
1177 vec
[i
].iov_base
= &fce_v2x
;
1178 vec
[i
++].iov_len
= fce_len
;
1179 if (!conn
->lgr
->is_smcd
) {
1180 if (clc
->hdr
.type
== SMC_CLC_CONFIRM
) {
1181 vec
[i
].iov_base
= &gle
;
1182 vec
[i
++].iov_len
= sizeof(gle
);
1183 vec
[i
].iov_base
= &ini
->smcrv2
.gidlist
.list
;
1184 vec
[i
++].iov_len
= gle
.gid_cnt
*
1189 vec
[i
].iov_base
= &trl
;
1190 vec
[i
++].iov_len
= sizeof(trl
);
1191 return kernel_sendmsg(smc
->clcsock
, &msg
, vec
, 1,
1192 ntohs(clc
->hdr
.length
));
1195 /* send CLC CONFIRM message across internal TCP socket */
1196 int smc_clc_send_confirm(struct smc_sock
*smc
, bool clnt_first_contact
,
1197 u8 version
, u8
*eid
, struct smc_init_info
*ini
)
1199 struct smc_clc_msg_accept_confirm cclc
;
1200 int reason_code
= 0;
1203 /* send SMC Confirm CLC msg */
1204 memset(&cclc
, 0, sizeof(cclc
));
1205 cclc
.hdr
.type
= SMC_CLC_CONFIRM
;
1206 len
= smc_clc_send_confirm_accept(smc
, &cclc
, clnt_first_contact
,
1208 if (len
< ntohs(cclc
.hdr
.length
)) {
1210 reason_code
= -ENETUNREACH
;
1211 smc
->sk
.sk_err
= -reason_code
;
1213 smc
->sk
.sk_err
= smc
->clcsock
->sk
->sk_err
;
1214 reason_code
= -smc
->sk
.sk_err
;
1220 /* send CLC ACCEPT message across internal TCP socket */
1221 int smc_clc_send_accept(struct smc_sock
*new_smc
, bool srv_first_contact
,
1222 u8 version
, u8
*negotiated_eid
, struct smc_init_info
*ini
)
1224 struct smc_clc_msg_accept_confirm aclc
;
1227 memset(&aclc
, 0, sizeof(aclc
));
1228 aclc
.hdr
.type
= SMC_CLC_ACCEPT
;
1229 len
= smc_clc_send_confirm_accept(new_smc
, &aclc
, srv_first_contact
,
1230 version
, negotiated_eid
, ini
);
1231 if (len
< ntohs(aclc
.hdr
.length
))
1232 len
= len
>= 0 ? -EPROTO
: -new_smc
->clcsock
->sk
->sk_err
;
1234 return len
> 0 ? 0 : len
;
1237 int smc_clc_srv_v2x_features_validate(struct smc_sock
*smc
,
1238 struct smc_clc_msg_proposal
*pclc
,
1239 struct smc_init_info
*ini
)
1241 struct smc_clc_v2_extension
*pclc_v2_ext
;
1242 struct net
*net
= sock_net(&smc
->sk
);
1244 ini
->max_conns
= SMC_CONN_PER_LGR_MAX
;
1245 ini
->max_links
= SMC_LINKS_ADD_LNK_MAX
;
1246 ini
->feature_mask
= SMC_FEATURE_MASK
;
1248 if ((!(ini
->smcd_version
& SMC_V2
) && !(ini
->smcr_version
& SMC_V2
)) ||
1249 ini
->release_nr
< SMC_RELEASE_1
)
1252 pclc_v2_ext
= smc_get_clc_v2_ext(pclc
);
1254 return SMC_CLC_DECL_NOV2EXT
;
1256 if (ini
->smcr_version
& SMC_V2
) {
1257 ini
->max_conns
= min_t(u8
, pclc_v2_ext
->max_conns
,
1258 net
->smc
.sysctl_max_conns_per_lgr
);
1259 if (ini
->max_conns
< SMC_CONN_PER_LGR_MIN
)
1260 return SMC_CLC_DECL_MAXCONNERR
;
1262 ini
->max_links
= min_t(u8
, pclc_v2_ext
->max_links
,
1263 net
->smc
.sysctl_max_links_per_lgr
);
1264 if (ini
->max_links
< SMC_LINKS_ADD_LNK_MIN
)
1265 return SMC_CLC_DECL_MAXLINKERR
;
1271 int smc_clc_clnt_v2x_features_validate(struct smc_clc_first_contact_ext
*fce
,
1272 struct smc_init_info
*ini
)
1274 struct smc_clc_first_contact_ext_v2x
*fce_v2x
=
1275 (struct smc_clc_first_contact_ext_v2x
*)fce
;
1277 if (ini
->release_nr
< SMC_RELEASE_1
)
1280 if (!ini
->is_smcd
) {
1281 if (fce_v2x
->max_conns
< SMC_CONN_PER_LGR_MIN
)
1282 return SMC_CLC_DECL_MAXCONNERR
;
1283 ini
->max_conns
= fce_v2x
->max_conns
;
1285 if (fce_v2x
->max_links
> SMC_LINKS_ADD_LNK_MAX
||
1286 fce_v2x
->max_links
< SMC_LINKS_ADD_LNK_MIN
)
1287 return SMC_CLC_DECL_MAXLINKERR
;
1288 ini
->max_links
= fce_v2x
->max_links
;
1290 /* common supplemental features of server and client */
1291 ini
->feature_mask
= ntohs(fce_v2x
->feature_mask
) & SMC_FEATURE_MASK
;
1296 int smc_clc_v2x_features_confirm_check(struct smc_clc_msg_accept_confirm
*cclc
,
1297 struct smc_init_info
*ini
)
1299 struct smc_clc_first_contact_ext
*fce
=
1300 smc_get_clc_first_contact_ext(cclc
, ini
->is_smcd
);
1301 struct smc_clc_first_contact_ext_v2x
*fce_v2x
=
1302 (struct smc_clc_first_contact_ext_v2x
*)fce
;
1304 if (cclc
->hdr
.version
== SMC_V1
||
1305 !(cclc
->hdr
.typev2
& SMC_FIRST_CONTACT_MASK
))
1308 if (ini
->release_nr
!= fce
->release
)
1309 return SMC_CLC_DECL_RELEASEERR
;
1311 if (fce
->release
< SMC_RELEASE_1
)
1314 if (!ini
->is_smcd
) {
1315 if (fce_v2x
->max_conns
!= ini
->max_conns
)
1316 return SMC_CLC_DECL_MAXCONNERR
;
1317 if (fce_v2x
->max_links
!= ini
->max_links
)
1318 return SMC_CLC_DECL_MAXLINKERR
;
1320 /* common supplemental features returned by client */
1321 ini
->feature_mask
= ntohs(fce_v2x
->feature_mask
);
1326 void smc_clc_get_hostname(u8
**host
)
1328 *host
= &smc_hostname
[0];
1331 void __init
smc_clc_init(void)
1333 struct new_utsname
*u
;
1335 memset(smc_hostname
, _S
, sizeof(smc_hostname
)); /* ASCII blanks */
1337 memcpy(smc_hostname
, u
->nodename
,
1338 min_t(size_t, strlen(u
->nodename
), sizeof(smc_hostname
)));
1340 INIT_LIST_HEAD(&smc_clc_eid_table
.list
);
1341 rwlock_init(&smc_clc_eid_table
.lock
);
1342 smc_clc_eid_table
.ueid_cnt
= 0;
1343 #if IS_ENABLED(CONFIG_S390)
1344 smc_clc_eid_table
.seid_enabled
= 1;
1346 smc_clc_eid_table
.seid_enabled
= 0;
1350 void smc_clc_exit(void)
1352 smc_clc_ueid_remove(NULL
);