1 // SPDX-License-Identifier: GPL-2.0
3 * Shared Memory Communications over RDMA (SMC-R) and RoCE
6 * Establish SMC-R as an Infiniband Client to be notified about added and
7 * removed IB devices of type RDMA.
8 * Determine device and port characteristics for these IB devices.
10 * Copyright IBM Corp. 2016
12 * Author(s): Ursula Braun <ubraun@linux.vnet.ibm.com>
15 #include <linux/etherdevice.h>
16 #include <linux/if_vlan.h>
17 #include <linux/random.h>
18 #include <linux/workqueue.h>
19 #include <linux/scatterlist.h>
20 #include <linux/wait.h>
21 #include <linux/mutex.h>
22 #include <linux/inetdevice.h>
23 #include <rdma/ib_verbs.h>
24 #include <rdma/ib_cache.h>
31 #include "smc_netlink.h"
33 #define SMC_MAX_CQE 32766 /* max. # of completion queue elements */
35 #define SMC_QP_MIN_RNR_TIMER 5
36 #define SMC_QP_TIMEOUT 15 /* 4096 * 2 ** timeout usec */
37 #define SMC_QP_RETRY_CNT 7 /* 7: infinite */
38 #define SMC_QP_RNR_RETRY 7 /* 7: infinite */
40 struct smc_ib_devices smc_ib_devices
= { /* smc-registered ib devices */
41 .mutex
= __MUTEX_INITIALIZER(smc_ib_devices
.mutex
),
42 .list
= LIST_HEAD_INIT(smc_ib_devices
.list
),
45 u8 local_systemid
[SMC_SYSTEMID_LEN
]; /* unique system identifier */
47 static int smc_ib_modify_qp_init(struct smc_link
*lnk
)
49 struct ib_qp_attr qp_attr
;
51 memset(&qp_attr
, 0, sizeof(qp_attr
));
52 qp_attr
.qp_state
= IB_QPS_INIT
;
53 qp_attr
.pkey_index
= 0;
54 qp_attr
.port_num
= lnk
->ibport
;
55 qp_attr
.qp_access_flags
= IB_ACCESS_LOCAL_WRITE
56 | IB_ACCESS_REMOTE_WRITE
;
57 return ib_modify_qp(lnk
->roce_qp
, &qp_attr
,
58 IB_QP_STATE
| IB_QP_PKEY_INDEX
|
59 IB_QP_ACCESS_FLAGS
| IB_QP_PORT
);
62 static int smc_ib_modify_qp_rtr(struct smc_link
*lnk
)
64 enum ib_qp_attr_mask qp_attr_mask
=
65 IB_QP_STATE
| IB_QP_AV
| IB_QP_PATH_MTU
| IB_QP_DEST_QPN
|
66 IB_QP_RQ_PSN
| IB_QP_MAX_DEST_RD_ATOMIC
| IB_QP_MIN_RNR_TIMER
;
67 struct ib_qp_attr qp_attr
;
70 memset(&qp_attr
, 0, sizeof(qp_attr
));
71 qp_attr
.qp_state
= IB_QPS_RTR
;
72 qp_attr
.path_mtu
= min(lnk
->path_mtu
, lnk
->peer_mtu
);
73 qp_attr
.ah_attr
.type
= RDMA_AH_ATTR_TYPE_ROCE
;
74 rdma_ah_set_port_num(&qp_attr
.ah_attr
, lnk
->ibport
);
75 if (lnk
->lgr
->smc_version
== SMC_V2
&& lnk
->lgr
->uses_gateway
)
76 hop_lim
= IPV6_DEFAULT_HOPLIMIT
;
77 rdma_ah_set_grh(&qp_attr
.ah_attr
, NULL
, 0, lnk
->sgid_index
, hop_lim
, 0);
78 rdma_ah_set_dgid_raw(&qp_attr
.ah_attr
, lnk
->peer_gid
);
79 if (lnk
->lgr
->smc_version
== SMC_V2
&& lnk
->lgr
->uses_gateway
)
80 memcpy(&qp_attr
.ah_attr
.roce
.dmac
, lnk
->lgr
->nexthop_mac
,
81 sizeof(lnk
->lgr
->nexthop_mac
));
83 memcpy(&qp_attr
.ah_attr
.roce
.dmac
, lnk
->peer_mac
,
84 sizeof(lnk
->peer_mac
));
85 qp_attr
.dest_qp_num
= lnk
->peer_qpn
;
86 qp_attr
.rq_psn
= lnk
->peer_psn
; /* starting receive packet seq # */
87 qp_attr
.max_dest_rd_atomic
= 1; /* max # of resources for incoming
90 qp_attr
.min_rnr_timer
= SMC_QP_MIN_RNR_TIMER
;
92 return ib_modify_qp(lnk
->roce_qp
, &qp_attr
, qp_attr_mask
);
95 int smc_ib_modify_qp_rts(struct smc_link
*lnk
)
97 struct ib_qp_attr qp_attr
;
99 memset(&qp_attr
, 0, sizeof(qp_attr
));
100 qp_attr
.qp_state
= IB_QPS_RTS
;
101 qp_attr
.timeout
= SMC_QP_TIMEOUT
; /* local ack timeout */
102 qp_attr
.retry_cnt
= SMC_QP_RETRY_CNT
; /* retry count */
103 qp_attr
.rnr_retry
= SMC_QP_RNR_RETRY
; /* RNR retries, 7=infinite */
104 qp_attr
.sq_psn
= lnk
->psn_initial
; /* starting send packet seq # */
105 qp_attr
.max_rd_atomic
= 1; /* # of outstanding RDMA reads and
108 return ib_modify_qp(lnk
->roce_qp
, &qp_attr
,
109 IB_QP_STATE
| IB_QP_TIMEOUT
| IB_QP_RETRY_CNT
|
110 IB_QP_SQ_PSN
| IB_QP_RNR_RETRY
|
111 IB_QP_MAX_QP_RD_ATOMIC
);
114 int smc_ib_modify_qp_error(struct smc_link
*lnk
)
116 struct ib_qp_attr qp_attr
;
118 memset(&qp_attr
, 0, sizeof(qp_attr
));
119 qp_attr
.qp_state
= IB_QPS_ERR
;
120 return ib_modify_qp(lnk
->roce_qp
, &qp_attr
, IB_QP_STATE
);
123 int smc_ib_ready_link(struct smc_link
*lnk
)
125 struct smc_link_group
*lgr
= smc_get_lgr(lnk
);
128 rc
= smc_ib_modify_qp_init(lnk
);
132 rc
= smc_ib_modify_qp_rtr(lnk
);
135 smc_wr_remember_qp_attr(lnk
);
136 rc
= ib_req_notify_cq(lnk
->smcibdev
->roce_cq_recv
,
137 IB_CQ_SOLICITED_MASK
);
140 rc
= smc_wr_rx_post_init(lnk
);
143 smc_wr_remember_qp_attr(lnk
);
145 if (lgr
->role
== SMC_SERV
) {
146 rc
= smc_ib_modify_qp_rts(lnk
);
149 smc_wr_remember_qp_attr(lnk
);
155 static int smc_ib_fill_mac(struct smc_ib_device
*smcibdev
, u8 ibport
)
157 const struct ib_gid_attr
*attr
;
160 attr
= rdma_get_gid_attr(smcibdev
->ibdev
, ibport
, 0);
164 rc
= rdma_read_gid_l2_fields(attr
, NULL
, smcibdev
->mac
[ibport
- 1]);
165 rdma_put_gid_attr(attr
);
169 /* Create an identifier unique for this instance of SMC-R.
170 * The MAC-address of the first active registered IB device
171 * plus a random 2-byte number is used to create this identifier.
172 * This name is delivered to the peer during connection initialization.
174 static inline void smc_ib_define_local_systemid(struct smc_ib_device
*smcibdev
,
177 memcpy(&local_systemid
[2], &smcibdev
->mac
[ibport
- 1],
178 sizeof(smcibdev
->mac
[ibport
- 1]));
181 bool smc_ib_is_valid_local_systemid(void)
183 return !is_zero_ether_addr(&local_systemid
[2]);
186 static void smc_ib_init_local_systemid(void)
188 get_random_bytes(&local_systemid
[0], 2);
191 bool smc_ib_port_active(struct smc_ib_device
*smcibdev
, u8 ibport
)
193 return smcibdev
->pattr
[ibport
- 1].state
== IB_PORT_ACTIVE
;
196 int smc_ib_find_route(struct net
*net
, __be32 saddr
, __be32 daddr
,
197 u8 nexthop_mac
[], u8
*uses_gateway
)
199 struct neighbour
*neigh
= NULL
;
200 struct rtable
*rt
= NULL
;
201 struct flowi4 fl4
= {
206 if (daddr
== cpu_to_be32(INADDR_NONE
))
208 rt
= ip_route_output_flow(net
, &fl4
, NULL
);
211 if (rt
->rt_uses_gateway
&& rt
->rt_gw_family
!= AF_INET
)
213 neigh
= dst_neigh_lookup(&rt
->dst
, &fl4
.daddr
);
216 memcpy(nexthop_mac
, neigh
->ha
, ETH_ALEN
);
217 *uses_gateway
= rt
->rt_uses_gateway
;
218 neigh_release(neigh
);
228 static int smc_ib_determine_gid_rcu(const struct net_device
*ndev
,
229 const struct ib_gid_attr
*attr
,
230 u8 gid
[], u8
*sgid_index
,
231 struct smc_init_info_smcrv2
*smcrv2
)
233 if (!smcrv2
&& attr
->gid_type
== IB_GID_TYPE_ROCE
) {
235 memcpy(gid
, &attr
->gid
, SMC_GID_SIZE
);
237 *sgid_index
= attr
->index
;
240 if (smcrv2
&& attr
->gid_type
== IB_GID_TYPE_ROCE_UDP_ENCAP
&&
241 smc_ib_gid_to_ipv4((u8
*)&attr
->gid
) != cpu_to_be32(INADDR_NONE
)) {
242 struct in_device
*in_dev
= __in_dev_get_rcu(ndev
);
243 struct net
*net
= dev_net(ndev
);
244 const struct in_ifaddr
*ifa
;
245 bool subnet_match
= false;
249 in_dev_for_each_ifa_rcu(ifa
, in_dev
) {
250 if (!inet_ifa_match(smcrv2
->saddr
, ifa
))
257 if (smcrv2
->daddr
&& smc_ib_find_route(net
, smcrv2
->saddr
,
260 &smcrv2
->uses_gateway
))
264 memcpy(gid
, &attr
->gid
, SMC_GID_SIZE
);
266 *sgid_index
= attr
->index
;
273 /* determine the gid for an ib-device port and vlan id */
274 int smc_ib_determine_gid(struct smc_ib_device
*smcibdev
, u8 ibport
,
275 unsigned short vlan_id
, u8 gid
[], u8
*sgid_index
,
276 struct smc_init_info_smcrv2
*smcrv2
)
278 const struct ib_gid_attr
*attr
;
279 const struct net_device
*ndev
;
282 for (i
= 0; i
< smcibdev
->pattr
[ibport
- 1].gid_tbl_len
; i
++) {
283 attr
= rdma_get_gid_attr(smcibdev
->ibdev
, ibport
, i
);
288 ndev
= rdma_read_gid_attr_ndev_rcu(attr
);
290 ((!vlan_id
&& !is_vlan_dev(ndev
)) ||
291 (vlan_id
&& is_vlan_dev(ndev
) &&
292 vlan_dev_vlan_id(ndev
) == vlan_id
))) {
293 if (!smc_ib_determine_gid_rcu(ndev
, attr
, gid
,
294 sgid_index
, smcrv2
)) {
296 rdma_put_gid_attr(attr
);
301 rdma_put_gid_attr(attr
);
306 /* check if gid is still defined on smcibdev */
307 static bool smc_ib_check_link_gid(u8 gid
[SMC_GID_SIZE
], bool smcrv2
,
308 struct smc_ib_device
*smcibdev
, u8 ibport
)
310 const struct ib_gid_attr
*attr
;
314 for (i
= 0; !rc
&& i
< smcibdev
->pattr
[ibport
- 1].gid_tbl_len
; i
++) {
315 attr
= rdma_get_gid_attr(smcibdev
->ibdev
, ibport
, i
);
320 if ((!smcrv2
&& attr
->gid_type
== IB_GID_TYPE_ROCE
) ||
321 (smcrv2
&& attr
->gid_type
== IB_GID_TYPE_ROCE_UDP_ENCAP
&&
322 !(ipv6_addr_type((const struct in6_addr
*)&attr
->gid
)
323 & IPV6_ADDR_LINKLOCAL
)))
324 if (!memcmp(gid
, &attr
->gid
, SMC_GID_SIZE
))
327 rdma_put_gid_attr(attr
);
332 /* check all links if the gid is still defined on smcibdev */
333 static void smc_ib_gid_check(struct smc_ib_device
*smcibdev
, u8 ibport
)
335 struct smc_link_group
*lgr
;
338 spin_lock_bh(&smc_lgr_list
.lock
);
339 list_for_each_entry(lgr
, &smc_lgr_list
.list
, list
) {
340 if (strncmp(smcibdev
->pnetid
[ibport
- 1], lgr
->pnet_id
,
342 continue; /* lgr is not affected */
343 if (list_empty(&lgr
->list
))
345 for (i
= 0; i
< SMC_LINKS_PER_LGR_MAX
; i
++) {
346 if (lgr
->lnk
[i
].state
== SMC_LNK_UNUSED
||
347 lgr
->lnk
[i
].smcibdev
!= smcibdev
)
349 if (!smc_ib_check_link_gid(lgr
->lnk
[i
].gid
,
350 lgr
->smc_version
== SMC_V2
,
352 smcr_port_err(smcibdev
, ibport
);
355 spin_unlock_bh(&smc_lgr_list
.lock
);
358 static int smc_ib_remember_port_attr(struct smc_ib_device
*smcibdev
, u8 ibport
)
362 memset(&smcibdev
->pattr
[ibport
- 1], 0,
363 sizeof(smcibdev
->pattr
[ibport
- 1]));
364 rc
= ib_query_port(smcibdev
->ibdev
, ibport
,
365 &smcibdev
->pattr
[ibport
- 1]);
368 /* the SMC protocol requires specification of the RoCE MAC address */
369 rc
= smc_ib_fill_mac(smcibdev
, ibport
);
372 if (!smc_ib_is_valid_local_systemid() &&
373 smc_ib_port_active(smcibdev
, ibport
))
374 /* create unique system identifier */
375 smc_ib_define_local_systemid(smcibdev
, ibport
);
380 /* process context wrapper for might_sleep smc_ib_remember_port_attr */
381 static void smc_ib_port_event_work(struct work_struct
*work
)
383 struct smc_ib_device
*smcibdev
= container_of(
384 work
, struct smc_ib_device
, port_event_work
);
387 for_each_set_bit(port_idx
, &smcibdev
->port_event_mask
, SMC_MAX_PORTS
) {
388 smc_ib_remember_port_attr(smcibdev
, port_idx
+ 1);
389 clear_bit(port_idx
, &smcibdev
->port_event_mask
);
390 if (!smc_ib_port_active(smcibdev
, port_idx
+ 1)) {
391 set_bit(port_idx
, smcibdev
->ports_going_away
);
392 smcr_port_err(smcibdev
, port_idx
+ 1);
394 clear_bit(port_idx
, smcibdev
->ports_going_away
);
395 smcr_port_add(smcibdev
, port_idx
+ 1);
396 smc_ib_gid_check(smcibdev
, port_idx
+ 1);
401 /* can be called in IRQ context */
402 static void smc_ib_global_event_handler(struct ib_event_handler
*handler
,
403 struct ib_event
*ibevent
)
405 struct smc_ib_device
*smcibdev
;
406 bool schedule
= false;
409 smcibdev
= container_of(handler
, struct smc_ib_device
, event_handler
);
411 switch (ibevent
->event
) {
412 case IB_EVENT_DEVICE_FATAL
:
413 /* terminate all ports on device */
414 for (port_idx
= 0; port_idx
< SMC_MAX_PORTS
; port_idx
++) {
415 set_bit(port_idx
, &smcibdev
->port_event_mask
);
416 if (!test_and_set_bit(port_idx
,
417 smcibdev
->ports_going_away
))
421 schedule_work(&smcibdev
->port_event_work
);
423 case IB_EVENT_PORT_ACTIVE
:
424 port_idx
= ibevent
->element
.port_num
- 1;
425 if (port_idx
>= SMC_MAX_PORTS
)
427 set_bit(port_idx
, &smcibdev
->port_event_mask
);
428 if (test_and_clear_bit(port_idx
, smcibdev
->ports_going_away
))
429 schedule_work(&smcibdev
->port_event_work
);
431 case IB_EVENT_PORT_ERR
:
432 port_idx
= ibevent
->element
.port_num
- 1;
433 if (port_idx
>= SMC_MAX_PORTS
)
435 set_bit(port_idx
, &smcibdev
->port_event_mask
);
436 if (!test_and_set_bit(port_idx
, smcibdev
->ports_going_away
))
437 schedule_work(&smcibdev
->port_event_work
);
439 case IB_EVENT_GID_CHANGE
:
440 port_idx
= ibevent
->element
.port_num
- 1;
441 if (port_idx
>= SMC_MAX_PORTS
)
443 set_bit(port_idx
, &smcibdev
->port_event_mask
);
444 schedule_work(&smcibdev
->port_event_work
);
451 void smc_ib_dealloc_protection_domain(struct smc_link
*lnk
)
454 ib_dealloc_pd(lnk
->roce_pd
);
458 int smc_ib_create_protection_domain(struct smc_link
*lnk
)
462 lnk
->roce_pd
= ib_alloc_pd(lnk
->smcibdev
->ibdev
, 0);
463 rc
= PTR_ERR_OR_ZERO(lnk
->roce_pd
);
464 if (IS_ERR(lnk
->roce_pd
))
469 static bool smcr_diag_is_dev_critical(struct smc_lgr_list
*smc_lgr
,
470 struct smc_ib_device
*smcibdev
)
472 struct smc_link_group
*lgr
;
476 spin_lock_bh(&smc_lgr
->lock
);
477 list_for_each_entry(lgr
, &smc_lgr
->list
, list
) {
480 for (i
= 0; i
< SMC_LINKS_PER_LGR_MAX
; i
++) {
481 if (lgr
->lnk
[i
].state
== SMC_LNK_UNUSED
||
482 lgr
->lnk
[i
].smcibdev
!= smcibdev
)
484 if (lgr
->type
== SMC_LGR_SINGLE
||
485 lgr
->type
== SMC_LGR_ASYMMETRIC_LOCAL
) {
492 spin_unlock_bh(&smc_lgr
->lock
);
496 static int smc_nl_handle_dev_port(struct sk_buff
*skb
,
497 struct ib_device
*ibdev
,
498 struct smc_ib_device
*smcibdev
,
501 char smc_pnet
[SMC_MAX_PNETID_LEN
+ 1];
502 struct nlattr
*port_attrs
;
503 unsigned char port_state
;
506 port_attrs
= nla_nest_start(skb
, SMC_NLA_DEV_PORT
+ port
);
510 if (nla_put_u8(skb
, SMC_NLA_DEV_PORT_PNET_USR
,
511 smcibdev
->pnetid_by_user
[port
]))
513 memcpy(smc_pnet
, &smcibdev
->pnetid
[port
], SMC_MAX_PNETID_LEN
);
514 smc_pnet
[SMC_MAX_PNETID_LEN
] = 0;
515 if (nla_put_string(skb
, SMC_NLA_DEV_PORT_PNETID
, smc_pnet
))
517 if (nla_put_u32(skb
, SMC_NLA_DEV_PORT_NETDEV
,
518 smcibdev
->ndev_ifidx
[port
]))
520 if (nla_put_u8(skb
, SMC_NLA_DEV_PORT_VALID
, 1))
522 port_state
= smc_ib_port_active(smcibdev
, port
+ 1);
523 if (nla_put_u8(skb
, SMC_NLA_DEV_PORT_STATE
, port_state
))
525 lnk_count
= atomic_read(&smcibdev
->lnk_cnt_by_port
[port
]);
526 if (nla_put_u32(skb
, SMC_NLA_DEV_PORT_LNK_CNT
, lnk_count
))
528 nla_nest_end(skb
, port_attrs
);
531 nla_nest_cancel(skb
, port_attrs
);
536 static bool smc_nl_handle_pci_values(const struct smc_pci_dev
*smc_pci_dev
,
539 if (nla_put_u32(skb
, SMC_NLA_DEV_PCI_FID
, smc_pci_dev
->pci_fid
))
541 if (nla_put_u16(skb
, SMC_NLA_DEV_PCI_CHID
, smc_pci_dev
->pci_pchid
))
543 if (nla_put_u16(skb
, SMC_NLA_DEV_PCI_VENDOR
, smc_pci_dev
->pci_vendor
))
545 if (nla_put_u16(skb
, SMC_NLA_DEV_PCI_DEVICE
, smc_pci_dev
->pci_device
))
547 if (nla_put_string(skb
, SMC_NLA_DEV_PCI_ID
, smc_pci_dev
->pci_id
))
552 static int smc_nl_handle_smcr_dev(struct smc_ib_device
*smcibdev
,
554 struct netlink_callback
*cb
)
556 char smc_ibname
[IB_DEVICE_NAME_MAX
];
557 struct smc_pci_dev smc_pci_dev
;
558 struct pci_dev
*pci_dev
;
559 unsigned char is_crit
;
560 struct nlattr
*attrs
;
564 nlh
= genlmsg_put(skb
, NETLINK_CB(cb
->skb
).portid
, cb
->nlh
->nlmsg_seq
,
565 &smc_gen_nl_family
, NLM_F_MULTI
,
566 SMC_NETLINK_GET_DEV_SMCR
);
569 attrs
= nla_nest_start(skb
, SMC_GEN_DEV_SMCR
);
572 is_crit
= smcr_diag_is_dev_critical(&smc_lgr_list
, smcibdev
);
573 if (nla_put_u8(skb
, SMC_NLA_DEV_IS_CRIT
, is_crit
))
575 if (smcibdev
->ibdev
->dev
.parent
) {
576 memset(&smc_pci_dev
, 0, sizeof(smc_pci_dev
));
577 pci_dev
= to_pci_dev(smcibdev
->ibdev
->dev
.parent
);
578 smc_set_pci_values(pci_dev
, &smc_pci_dev
);
579 if (!smc_nl_handle_pci_values(&smc_pci_dev
, skb
))
582 snprintf(smc_ibname
, sizeof(smc_ibname
), "%s", smcibdev
->ibdev
->name
);
583 if (nla_put_string(skb
, SMC_NLA_DEV_IB_NAME
, smc_ibname
))
585 for (i
= 1; i
<= SMC_MAX_PORTS
; i
++) {
586 if (!rdma_is_port_valid(smcibdev
->ibdev
, i
))
588 if (smc_nl_handle_dev_port(skb
, smcibdev
->ibdev
,
593 nla_nest_end(skb
, attrs
);
594 genlmsg_end(skb
, nlh
);
598 nla_nest_cancel(skb
, attrs
);
600 genlmsg_cancel(skb
, nlh
);
605 static void smc_nl_prep_smcr_dev(struct smc_ib_devices
*dev_list
,
607 struct netlink_callback
*cb
)
609 struct smc_nl_dmp_ctx
*cb_ctx
= smc_nl_dmp_ctx(cb
);
610 struct smc_ib_device
*smcibdev
;
611 int snum
= cb_ctx
->pos
[0];
614 mutex_lock(&dev_list
->mutex
);
615 list_for_each_entry(smcibdev
, &dev_list
->list
, list
) {
618 if (smc_nl_handle_smcr_dev(smcibdev
, skb
, cb
))
624 mutex_unlock(&dev_list
->mutex
);
625 cb_ctx
->pos
[0] = num
;
628 int smcr_nl_get_device(struct sk_buff
*skb
, struct netlink_callback
*cb
)
630 smc_nl_prep_smcr_dev(&smc_ib_devices
, skb
, cb
);
634 static void smc_ib_qp_event_handler(struct ib_event
*ibevent
, void *priv
)
636 struct smc_link
*lnk
= (struct smc_link
*)priv
;
637 struct smc_ib_device
*smcibdev
= lnk
->smcibdev
;
640 switch (ibevent
->event
) {
641 case IB_EVENT_QP_FATAL
:
642 case IB_EVENT_QP_ACCESS_ERR
:
643 port_idx
= ibevent
->element
.qp
->port
- 1;
644 if (port_idx
>= SMC_MAX_PORTS
)
646 set_bit(port_idx
, &smcibdev
->port_event_mask
);
647 if (!test_and_set_bit(port_idx
, smcibdev
->ports_going_away
))
648 schedule_work(&smcibdev
->port_event_work
);
655 void smc_ib_destroy_queue_pair(struct smc_link
*lnk
)
658 ib_destroy_qp(lnk
->roce_qp
);
662 /* create a queue pair within the protection domain for a link */
663 int smc_ib_create_queue_pair(struct smc_link
*lnk
)
665 int sges_per_buf
= (lnk
->lgr
->smc_version
== SMC_V2
) ? 2 : 1;
666 struct ib_qp_init_attr qp_attr
= {
667 .event_handler
= smc_ib_qp_event_handler
,
669 .send_cq
= lnk
->smcibdev
->roce_cq_send
,
670 .recv_cq
= lnk
->smcibdev
->roce_cq_recv
,
673 /* include unsolicited rdma_writes as well,
674 * there are max. 2 RDMA_WRITE per 1 WR_SEND
676 .max_send_wr
= SMC_WR_BUF_CNT
* 3,
677 .max_recv_wr
= SMC_WR_BUF_CNT
* 3,
678 .max_send_sge
= SMC_IB_MAX_SEND_SGE
,
679 .max_recv_sge
= sges_per_buf
,
680 .max_inline_data
= 0,
682 .sq_sig_type
= IB_SIGNAL_REQ_WR
,
683 .qp_type
= IB_QPT_RC
,
687 lnk
->roce_qp
= ib_create_qp(lnk
->roce_pd
, &qp_attr
);
688 rc
= PTR_ERR_OR_ZERO(lnk
->roce_qp
);
689 if (IS_ERR(lnk
->roce_qp
))
692 smc_wr_remember_qp_attr(lnk
);
696 void smc_ib_put_memory_region(struct ib_mr
*mr
)
701 static int smc_ib_map_mr_sg(struct smc_buf_desc
*buf_slot
, u8 link_idx
)
703 unsigned int offset
= 0;
706 /* map the largest prefix of a dma mapped SG list */
707 sg_num
= ib_map_mr_sg(buf_slot
->mr
[link_idx
],
708 buf_slot
->sgt
[link_idx
].sgl
,
709 buf_slot
->sgt
[link_idx
].orig_nents
,
715 /* Allocate a memory region and map the dma mapped SG list of buf_slot */
716 int smc_ib_get_memory_region(struct ib_pd
*pd
, int access_flags
,
717 struct smc_buf_desc
*buf_slot
, u8 link_idx
)
719 if (buf_slot
->mr
[link_idx
])
720 return 0; /* already done */
722 buf_slot
->mr
[link_idx
] =
723 ib_alloc_mr(pd
, IB_MR_TYPE_MEM_REG
, 1 << buf_slot
->order
);
724 if (IS_ERR(buf_slot
->mr
[link_idx
])) {
727 rc
= PTR_ERR(buf_slot
->mr
[link_idx
]);
728 buf_slot
->mr
[link_idx
] = NULL
;
732 if (smc_ib_map_mr_sg(buf_slot
, link_idx
) !=
733 buf_slot
->sgt
[link_idx
].orig_nents
)
739 bool smc_ib_is_sg_need_sync(struct smc_link
*lnk
,
740 struct smc_buf_desc
*buf_slot
)
742 struct scatterlist
*sg
;
746 /* for now there is just one DMA address */
747 for_each_sg(buf_slot
->sgt
[lnk
->link_idx
].sgl
, sg
,
748 buf_slot
->sgt
[lnk
->link_idx
].nents
, i
) {
751 if (dma_need_sync(lnk
->smcibdev
->ibdev
->dma_device
,
752 sg_dma_address(sg
))) {
762 /* synchronize buffer usage for cpu access */
763 void smc_ib_sync_sg_for_cpu(struct smc_link
*lnk
,
764 struct smc_buf_desc
*buf_slot
,
765 enum dma_data_direction data_direction
)
767 struct scatterlist
*sg
;
770 if (!(buf_slot
->is_dma_need_sync
& (1U << lnk
->link_idx
)))
773 /* for now there is just one DMA address */
774 for_each_sg(buf_slot
->sgt
[lnk
->link_idx
].sgl
, sg
,
775 buf_slot
->sgt
[lnk
->link_idx
].nents
, i
) {
778 ib_dma_sync_single_for_cpu(lnk
->smcibdev
->ibdev
,
785 /* synchronize buffer usage for device access */
786 void smc_ib_sync_sg_for_device(struct smc_link
*lnk
,
787 struct smc_buf_desc
*buf_slot
,
788 enum dma_data_direction data_direction
)
790 struct scatterlist
*sg
;
793 if (!(buf_slot
->is_dma_need_sync
& (1U << lnk
->link_idx
)))
796 /* for now there is just one DMA address */
797 for_each_sg(buf_slot
->sgt
[lnk
->link_idx
].sgl
, sg
,
798 buf_slot
->sgt
[lnk
->link_idx
].nents
, i
) {
801 ib_dma_sync_single_for_device(lnk
->smcibdev
->ibdev
,
808 /* Map a new TX or RX buffer SG-table to DMA */
809 int smc_ib_buf_map_sg(struct smc_link
*lnk
,
810 struct smc_buf_desc
*buf_slot
,
811 enum dma_data_direction data_direction
)
815 mapped_nents
= ib_dma_map_sg(lnk
->smcibdev
->ibdev
,
816 buf_slot
->sgt
[lnk
->link_idx
].sgl
,
817 buf_slot
->sgt
[lnk
->link_idx
].orig_nents
,
825 void smc_ib_buf_unmap_sg(struct smc_link
*lnk
,
826 struct smc_buf_desc
*buf_slot
,
827 enum dma_data_direction data_direction
)
829 if (!buf_slot
->sgt
[lnk
->link_idx
].sgl
->dma_address
)
830 return; /* already unmapped */
832 ib_dma_unmap_sg(lnk
->smcibdev
->ibdev
,
833 buf_slot
->sgt
[lnk
->link_idx
].sgl
,
834 buf_slot
->sgt
[lnk
->link_idx
].orig_nents
,
836 buf_slot
->sgt
[lnk
->link_idx
].sgl
->dma_address
= 0;
839 long smc_ib_setup_per_ibdev(struct smc_ib_device
*smcibdev
)
841 struct ib_cq_init_attr cqattr
= {
842 .cqe
= SMC_MAX_CQE
, .comp_vector
= 0 };
843 int cqe_size_order
, smc_order
;
846 mutex_lock(&smcibdev
->mutex
);
848 if (smcibdev
->initialized
)
850 /* the calculated number of cq entries fits to mlx5 cq allocation */
851 cqe_size_order
= cache_line_size() == 128 ? 7 : 6;
852 smc_order
= MAX_PAGE_ORDER
- cqe_size_order
;
853 if (SMC_MAX_CQE
+ 2 > (0x00000001 << smc_order
) * PAGE_SIZE
)
854 cqattr
.cqe
= (0x00000001 << smc_order
) * PAGE_SIZE
- 2;
855 smcibdev
->roce_cq_send
= ib_create_cq(smcibdev
->ibdev
,
856 smc_wr_tx_cq_handler
, NULL
,
858 rc
= PTR_ERR_OR_ZERO(smcibdev
->roce_cq_send
);
859 if (IS_ERR(smcibdev
->roce_cq_send
)) {
860 smcibdev
->roce_cq_send
= NULL
;
863 smcibdev
->roce_cq_recv
= ib_create_cq(smcibdev
->ibdev
,
864 smc_wr_rx_cq_handler
, NULL
,
866 rc
= PTR_ERR_OR_ZERO(smcibdev
->roce_cq_recv
);
867 if (IS_ERR(smcibdev
->roce_cq_recv
)) {
868 smcibdev
->roce_cq_recv
= NULL
;
871 smc_wr_add_dev(smcibdev
);
872 smcibdev
->initialized
= 1;
876 ib_destroy_cq(smcibdev
->roce_cq_send
);
878 mutex_unlock(&smcibdev
->mutex
);
882 static void smc_ib_cleanup_per_ibdev(struct smc_ib_device
*smcibdev
)
884 mutex_lock(&smcibdev
->mutex
);
885 if (!smcibdev
->initialized
)
887 smcibdev
->initialized
= 0;
888 ib_destroy_cq(smcibdev
->roce_cq_recv
);
889 ib_destroy_cq(smcibdev
->roce_cq_send
);
890 smc_wr_remove_dev(smcibdev
);
892 mutex_unlock(&smcibdev
->mutex
);
895 static struct ib_client smc_ib_client
;
897 static void smc_copy_netdev_ifindex(struct smc_ib_device
*smcibdev
, int port
)
899 struct ib_device
*ibdev
= smcibdev
->ibdev
;
900 struct net_device
*ndev
;
902 if (!ibdev
->ops
.get_netdev
)
904 ndev
= ibdev
->ops
.get_netdev(ibdev
, port
+ 1);
906 smcibdev
->ndev_ifidx
[port
] = ndev
->ifindex
;
911 void smc_ib_ndev_change(struct net_device
*ndev
, unsigned long event
)
913 struct smc_ib_device
*smcibdev
;
914 struct ib_device
*libdev
;
915 struct net_device
*lndev
;
919 mutex_lock(&smc_ib_devices
.mutex
);
920 list_for_each_entry(smcibdev
, &smc_ib_devices
.list
, list
) {
921 port_cnt
= smcibdev
->ibdev
->phys_port_cnt
;
922 for (i
= 0; i
< min_t(size_t, port_cnt
, SMC_MAX_PORTS
); i
++) {
923 libdev
= smcibdev
->ibdev
;
924 if (!libdev
->ops
.get_netdev
)
926 lndev
= libdev
->ops
.get_netdev(libdev
, i
+ 1);
930 if (event
== NETDEV_REGISTER
)
931 smcibdev
->ndev_ifidx
[i
] = ndev
->ifindex
;
932 if (event
== NETDEV_UNREGISTER
)
933 smcibdev
->ndev_ifidx
[i
] = 0;
936 mutex_unlock(&smc_ib_devices
.mutex
);
939 /* callback function for ib_register_client() */
940 static int smc_ib_add_dev(struct ib_device
*ibdev
)
942 struct smc_ib_device
*smcibdev
;
946 if (ibdev
->node_type
!= RDMA_NODE_IB_CA
)
949 smcibdev
= kzalloc(sizeof(*smcibdev
), GFP_KERNEL
);
953 smcibdev
->ibdev
= ibdev
;
954 INIT_WORK(&smcibdev
->port_event_work
, smc_ib_port_event_work
);
955 atomic_set(&smcibdev
->lnk_cnt
, 0);
956 init_waitqueue_head(&smcibdev
->lnks_deleted
);
957 mutex_init(&smcibdev
->mutex
);
958 mutex_lock(&smc_ib_devices
.mutex
);
959 list_add_tail(&smcibdev
->list
, &smc_ib_devices
.list
);
960 mutex_unlock(&smc_ib_devices
.mutex
);
961 ib_set_client_data(ibdev
, &smc_ib_client
, smcibdev
);
962 INIT_IB_EVENT_HANDLER(&smcibdev
->event_handler
, smcibdev
->ibdev
,
963 smc_ib_global_event_handler
);
964 ib_register_event_handler(&smcibdev
->event_handler
);
966 /* trigger reading of the port attributes */
967 port_cnt
= smcibdev
->ibdev
->phys_port_cnt
;
968 pr_warn_ratelimited("smc: adding ib device %s with port count %d\n",
969 smcibdev
->ibdev
->name
, port_cnt
);
971 i
< min_t(size_t, port_cnt
, SMC_MAX_PORTS
);
973 set_bit(i
, &smcibdev
->port_event_mask
);
974 /* determine pnetids of the port */
975 if (smc_pnetid_by_dev_port(ibdev
->dev
.parent
, i
,
976 smcibdev
->pnetid
[i
]))
977 smc_pnetid_by_table_ib(smcibdev
, i
+ 1);
978 smc_copy_netdev_ifindex(smcibdev
, i
);
979 pr_warn_ratelimited("smc: ib device %s port %d has pnetid "
981 smcibdev
->ibdev
->name
, i
+ 1,
983 smcibdev
->pnetid_by_user
[i
] ?
987 schedule_work(&smcibdev
->port_event_work
);
991 /* callback function for ib_unregister_client() */
992 static void smc_ib_remove_dev(struct ib_device
*ibdev
, void *client_data
)
994 struct smc_ib_device
*smcibdev
= client_data
;
996 mutex_lock(&smc_ib_devices
.mutex
);
997 list_del_init(&smcibdev
->list
); /* remove from smc_ib_devices */
998 mutex_unlock(&smc_ib_devices
.mutex
);
999 pr_warn_ratelimited("smc: removing ib device %s\n",
1000 smcibdev
->ibdev
->name
);
1001 smc_smcr_terminate_all(smcibdev
);
1002 smc_ib_cleanup_per_ibdev(smcibdev
);
1003 ib_unregister_event_handler(&smcibdev
->event_handler
);
1004 cancel_work_sync(&smcibdev
->port_event_work
);
1008 static struct ib_client smc_ib_client
= {
1010 .add
= smc_ib_add_dev
,
1011 .remove
= smc_ib_remove_dev
,
1014 int __init
smc_ib_register_client(void)
1016 smc_ib_init_local_systemid();
1017 return ib_register_client(&smc_ib_client
);
1020 void smc_ib_unregister_client(void)
1022 ib_unregister_client(&smc_ib_client
);