1 // SPDX-License-Identifier: GPL-2.0
3 * Shared Memory Communications over RDMA (SMC-R) and RoCE
6 * Establish SMC-R as an Infiniband Client to be notified about added and
7 * removed IB devices of type RDMA.
8 * Determine device and port characteristics for these IB devices.
10 * Copyright IBM Corp. 2016
12 * Author(s): Ursula Braun <ubraun@linux.vnet.ibm.com>
15 #include <linux/random.h>
16 #include <linux/workqueue.h>
17 #include <linux/scatterlist.h>
18 #include <rdma/ib_verbs.h>
19 #include <rdma/ib_cache.h>
27 #define SMC_MAX_CQE 32766 /* max. # of completion queue elements */
29 #define SMC_QP_MIN_RNR_TIMER 5
30 #define SMC_QP_TIMEOUT 15 /* 4096 * 2 ** timeout usec */
31 #define SMC_QP_RETRY_CNT 7 /* 7: infinite */
32 #define SMC_QP_RNR_RETRY 7 /* 7: infinite */
34 struct smc_ib_devices smc_ib_devices
= { /* smc-registered ib devices */
35 .lock
= __SPIN_LOCK_UNLOCKED(smc_ib_devices
.lock
),
36 .list
= LIST_HEAD_INIT(smc_ib_devices
.list
),
39 #define SMC_LOCAL_SYSTEMID_RESET "%%%%%%%"
41 u8 local_systemid
[SMC_SYSTEMID_LEN
] = SMC_LOCAL_SYSTEMID_RESET
; /* unique system
45 static int smc_ib_modify_qp_init(struct smc_link
*lnk
)
47 struct ib_qp_attr qp_attr
;
49 memset(&qp_attr
, 0, sizeof(qp_attr
));
50 qp_attr
.qp_state
= IB_QPS_INIT
;
51 qp_attr
.pkey_index
= 0;
52 qp_attr
.port_num
= lnk
->ibport
;
53 qp_attr
.qp_access_flags
= IB_ACCESS_LOCAL_WRITE
54 | IB_ACCESS_REMOTE_WRITE
;
55 return ib_modify_qp(lnk
->roce_qp
, &qp_attr
,
56 IB_QP_STATE
| IB_QP_PKEY_INDEX
|
57 IB_QP_ACCESS_FLAGS
| IB_QP_PORT
);
60 static int smc_ib_modify_qp_rtr(struct smc_link
*lnk
)
62 enum ib_qp_attr_mask qp_attr_mask
=
63 IB_QP_STATE
| IB_QP_AV
| IB_QP_PATH_MTU
| IB_QP_DEST_QPN
|
64 IB_QP_RQ_PSN
| IB_QP_MAX_DEST_RD_ATOMIC
| IB_QP_MIN_RNR_TIMER
;
65 struct ib_qp_attr qp_attr
;
67 memset(&qp_attr
, 0, sizeof(qp_attr
));
68 qp_attr
.qp_state
= IB_QPS_RTR
;
69 qp_attr
.path_mtu
= min(lnk
->path_mtu
, lnk
->peer_mtu
);
70 qp_attr
.ah_attr
.type
= RDMA_AH_ATTR_TYPE_ROCE
;
71 rdma_ah_set_port_num(&qp_attr
.ah_attr
, lnk
->ibport
);
72 rdma_ah_set_grh(&qp_attr
.ah_attr
, NULL
, 0, lnk
->sgid_index
, 1, 0);
73 rdma_ah_set_dgid_raw(&qp_attr
.ah_attr
, lnk
->peer_gid
);
74 memcpy(&qp_attr
.ah_attr
.roce
.dmac
, lnk
->peer_mac
,
75 sizeof(lnk
->peer_mac
));
76 qp_attr
.dest_qp_num
= lnk
->peer_qpn
;
77 qp_attr
.rq_psn
= lnk
->peer_psn
; /* starting receive packet seq # */
78 qp_attr
.max_dest_rd_atomic
= 1; /* max # of resources for incoming
81 qp_attr
.min_rnr_timer
= SMC_QP_MIN_RNR_TIMER
;
83 return ib_modify_qp(lnk
->roce_qp
, &qp_attr
, qp_attr_mask
);
86 int smc_ib_modify_qp_rts(struct smc_link
*lnk
)
88 struct ib_qp_attr qp_attr
;
90 memset(&qp_attr
, 0, sizeof(qp_attr
));
91 qp_attr
.qp_state
= IB_QPS_RTS
;
92 qp_attr
.timeout
= SMC_QP_TIMEOUT
; /* local ack timeout */
93 qp_attr
.retry_cnt
= SMC_QP_RETRY_CNT
; /* retry count */
94 qp_attr
.rnr_retry
= SMC_QP_RNR_RETRY
; /* RNR retries, 7=infinite */
95 qp_attr
.sq_psn
= lnk
->psn_initial
; /* starting send packet seq # */
96 qp_attr
.max_rd_atomic
= 1; /* # of outstanding RDMA reads and
99 return ib_modify_qp(lnk
->roce_qp
, &qp_attr
,
100 IB_QP_STATE
| IB_QP_TIMEOUT
| IB_QP_RETRY_CNT
|
101 IB_QP_SQ_PSN
| IB_QP_RNR_RETRY
|
102 IB_QP_MAX_QP_RD_ATOMIC
);
105 int smc_ib_modify_qp_reset(struct smc_link
*lnk
)
107 struct ib_qp_attr qp_attr
;
109 memset(&qp_attr
, 0, sizeof(qp_attr
));
110 qp_attr
.qp_state
= IB_QPS_RESET
;
111 return ib_modify_qp(lnk
->roce_qp
, &qp_attr
, IB_QP_STATE
);
114 int smc_ib_ready_link(struct smc_link
*lnk
)
116 struct smc_link_group
*lgr
= smc_get_lgr(lnk
);
119 rc
= smc_ib_modify_qp_init(lnk
);
123 rc
= smc_ib_modify_qp_rtr(lnk
);
126 smc_wr_remember_qp_attr(lnk
);
127 rc
= ib_req_notify_cq(lnk
->smcibdev
->roce_cq_recv
,
128 IB_CQ_SOLICITED_MASK
);
131 rc
= smc_wr_rx_post_init(lnk
);
134 smc_wr_remember_qp_attr(lnk
);
136 if (lgr
->role
== SMC_SERV
) {
137 rc
= smc_ib_modify_qp_rts(lnk
);
140 smc_wr_remember_qp_attr(lnk
);
146 static int smc_ib_fill_mac(struct smc_ib_device
*smcibdev
, u8 ibport
)
148 const struct ib_gid_attr
*attr
;
151 attr
= rdma_get_gid_attr(smcibdev
->ibdev
, ibport
, 0);
155 rc
= rdma_read_gid_l2_fields(attr
, NULL
, smcibdev
->mac
[ibport
- 1]);
156 rdma_put_gid_attr(attr
);
160 /* Create an identifier unique for this instance of SMC-R.
161 * The MAC-address of the first active registered IB device
162 * plus a random 2-byte number is used to create this identifier.
163 * This name is delivered to the peer during connection initialization.
165 static inline void smc_ib_define_local_systemid(struct smc_ib_device
*smcibdev
,
168 memcpy(&local_systemid
[2], &smcibdev
->mac
[ibport
- 1],
169 sizeof(smcibdev
->mac
[ibport
- 1]));
170 get_random_bytes(&local_systemid
[0], 2);
173 bool smc_ib_port_active(struct smc_ib_device
*smcibdev
, u8 ibport
)
175 return smcibdev
->pattr
[ibport
- 1].state
== IB_PORT_ACTIVE
;
178 /* determine the gid for an ib-device port and vlan id */
179 int smc_ib_determine_gid(struct smc_ib_device
*smcibdev
, u8 ibport
,
180 unsigned short vlan_id
, u8 gid
[], u8
*sgid_index
)
182 const struct ib_gid_attr
*attr
;
183 const struct net_device
*ndev
;
186 for (i
= 0; i
< smcibdev
->pattr
[ibport
- 1].gid_tbl_len
; i
++) {
187 attr
= rdma_get_gid_attr(smcibdev
->ibdev
, ibport
, i
);
192 ndev
= rdma_read_gid_attr_ndev_rcu(attr
);
194 ((!vlan_id
&& !is_vlan_dev(attr
->ndev
)) ||
195 (vlan_id
&& is_vlan_dev(attr
->ndev
) &&
196 vlan_dev_vlan_id(attr
->ndev
) == vlan_id
)) &&
197 attr
->gid_type
== IB_GID_TYPE_ROCE
) {
200 memcpy(gid
, &attr
->gid
, SMC_GID_SIZE
);
202 *sgid_index
= attr
->index
;
203 rdma_put_gid_attr(attr
);
207 rdma_put_gid_attr(attr
);
212 static int smc_ib_remember_port_attr(struct smc_ib_device
*smcibdev
, u8 ibport
)
216 memset(&smcibdev
->pattr
[ibport
- 1], 0,
217 sizeof(smcibdev
->pattr
[ibport
- 1]));
218 rc
= ib_query_port(smcibdev
->ibdev
, ibport
,
219 &smcibdev
->pattr
[ibport
- 1]);
222 /* the SMC protocol requires specification of the RoCE MAC address */
223 rc
= smc_ib_fill_mac(smcibdev
, ibport
);
226 if (!strncmp(local_systemid
, SMC_LOCAL_SYSTEMID_RESET
,
227 sizeof(local_systemid
)) &&
228 smc_ib_port_active(smcibdev
, ibport
))
229 /* create unique system identifier */
230 smc_ib_define_local_systemid(smcibdev
, ibport
);
235 /* process context wrapper for might_sleep smc_ib_remember_port_attr */
236 static void smc_ib_port_event_work(struct work_struct
*work
)
238 struct smc_ib_device
*smcibdev
= container_of(
239 work
, struct smc_ib_device
, port_event_work
);
242 for_each_set_bit(port_idx
, &smcibdev
->port_event_mask
, SMC_MAX_PORTS
) {
243 smc_ib_remember_port_attr(smcibdev
, port_idx
+ 1);
244 clear_bit(port_idx
, &smcibdev
->port_event_mask
);
245 if (!smc_ib_port_active(smcibdev
, port_idx
+ 1))
246 smc_port_terminate(smcibdev
, port_idx
+ 1);
250 /* can be called in IRQ context */
251 static void smc_ib_global_event_handler(struct ib_event_handler
*handler
,
252 struct ib_event
*ibevent
)
254 struct smc_ib_device
*smcibdev
;
257 smcibdev
= container_of(handler
, struct smc_ib_device
, event_handler
);
259 switch (ibevent
->event
) {
260 case IB_EVENT_DEVICE_FATAL
:
261 /* terminate all ports on device */
262 for (port_idx
= 0; port_idx
< SMC_MAX_PORTS
; port_idx
++)
263 set_bit(port_idx
, &smcibdev
->port_event_mask
);
264 schedule_work(&smcibdev
->port_event_work
);
266 case IB_EVENT_PORT_ERR
:
267 case IB_EVENT_PORT_ACTIVE
:
268 case IB_EVENT_GID_CHANGE
:
269 port_idx
= ibevent
->element
.port_num
- 1;
270 if (port_idx
< SMC_MAX_PORTS
) {
271 set_bit(port_idx
, &smcibdev
->port_event_mask
);
272 schedule_work(&smcibdev
->port_event_work
);
280 void smc_ib_dealloc_protection_domain(struct smc_link
*lnk
)
283 ib_dealloc_pd(lnk
->roce_pd
);
287 int smc_ib_create_protection_domain(struct smc_link
*lnk
)
291 lnk
->roce_pd
= ib_alloc_pd(lnk
->smcibdev
->ibdev
, 0);
292 rc
= PTR_ERR_OR_ZERO(lnk
->roce_pd
);
293 if (IS_ERR(lnk
->roce_pd
))
298 static void smc_ib_qp_event_handler(struct ib_event
*ibevent
, void *priv
)
300 struct smc_link
*lnk
= (struct smc_link
*)priv
;
301 struct smc_ib_device
*smcibdev
= lnk
->smcibdev
;
304 switch (ibevent
->event
) {
305 case IB_EVENT_QP_FATAL
:
306 case IB_EVENT_QP_ACCESS_ERR
:
307 port_idx
= ibevent
->element
.qp
->port
- 1;
308 if (port_idx
< SMC_MAX_PORTS
) {
309 set_bit(port_idx
, &smcibdev
->port_event_mask
);
310 schedule_work(&smcibdev
->port_event_work
);
318 void smc_ib_destroy_queue_pair(struct smc_link
*lnk
)
321 ib_destroy_qp(lnk
->roce_qp
);
325 /* create a queue pair within the protection domain for a link */
326 int smc_ib_create_queue_pair(struct smc_link
*lnk
)
328 struct ib_qp_init_attr qp_attr
= {
329 .event_handler
= smc_ib_qp_event_handler
,
331 .send_cq
= lnk
->smcibdev
->roce_cq_send
,
332 .recv_cq
= lnk
->smcibdev
->roce_cq_recv
,
335 /* include unsolicited rdma_writes as well,
336 * there are max. 2 RDMA_WRITE per 1 WR_SEND
338 .max_send_wr
= SMC_WR_BUF_CNT
* 3,
339 .max_recv_wr
= SMC_WR_BUF_CNT
* 3,
340 .max_send_sge
= SMC_IB_MAX_SEND_SGE
,
343 .sq_sig_type
= IB_SIGNAL_REQ_WR
,
344 .qp_type
= IB_QPT_RC
,
348 lnk
->roce_qp
= ib_create_qp(lnk
->roce_pd
, &qp_attr
);
349 rc
= PTR_ERR_OR_ZERO(lnk
->roce_qp
);
350 if (IS_ERR(lnk
->roce_qp
))
353 smc_wr_remember_qp_attr(lnk
);
357 void smc_ib_put_memory_region(struct ib_mr
*mr
)
362 static int smc_ib_map_mr_sg(struct smc_buf_desc
*buf_slot
)
364 unsigned int offset
= 0;
367 /* map the largest prefix of a dma mapped SG list */
368 sg_num
= ib_map_mr_sg(buf_slot
->mr_rx
[SMC_SINGLE_LINK
],
369 buf_slot
->sgt
[SMC_SINGLE_LINK
].sgl
,
370 buf_slot
->sgt
[SMC_SINGLE_LINK
].orig_nents
,
376 /* Allocate a memory region and map the dma mapped SG list of buf_slot */
377 int smc_ib_get_memory_region(struct ib_pd
*pd
, int access_flags
,
378 struct smc_buf_desc
*buf_slot
)
380 if (buf_slot
->mr_rx
[SMC_SINGLE_LINK
])
381 return 0; /* already done */
383 buf_slot
->mr_rx
[SMC_SINGLE_LINK
] =
384 ib_alloc_mr(pd
, IB_MR_TYPE_MEM_REG
, 1 << buf_slot
->order
);
385 if (IS_ERR(buf_slot
->mr_rx
[SMC_SINGLE_LINK
])) {
388 rc
= PTR_ERR(buf_slot
->mr_rx
[SMC_SINGLE_LINK
]);
389 buf_slot
->mr_rx
[SMC_SINGLE_LINK
] = NULL
;
393 if (smc_ib_map_mr_sg(buf_slot
) != 1)
399 /* synchronize buffer usage for cpu access */
400 void smc_ib_sync_sg_for_cpu(struct smc_ib_device
*smcibdev
,
401 struct smc_buf_desc
*buf_slot
,
402 enum dma_data_direction data_direction
)
404 struct scatterlist
*sg
;
407 /* for now there is just one DMA address */
408 for_each_sg(buf_slot
->sgt
[SMC_SINGLE_LINK
].sgl
, sg
,
409 buf_slot
->sgt
[SMC_SINGLE_LINK
].nents
, i
) {
412 ib_dma_sync_single_for_cpu(smcibdev
->ibdev
,
419 /* synchronize buffer usage for device access */
420 void smc_ib_sync_sg_for_device(struct smc_ib_device
*smcibdev
,
421 struct smc_buf_desc
*buf_slot
,
422 enum dma_data_direction data_direction
)
424 struct scatterlist
*sg
;
427 /* for now there is just one DMA address */
428 for_each_sg(buf_slot
->sgt
[SMC_SINGLE_LINK
].sgl
, sg
,
429 buf_slot
->sgt
[SMC_SINGLE_LINK
].nents
, i
) {
432 ib_dma_sync_single_for_device(smcibdev
->ibdev
,
439 /* Map a new TX or RX buffer SG-table to DMA */
440 int smc_ib_buf_map_sg(struct smc_ib_device
*smcibdev
,
441 struct smc_buf_desc
*buf_slot
,
442 enum dma_data_direction data_direction
)
446 mapped_nents
= ib_dma_map_sg(smcibdev
->ibdev
,
447 buf_slot
->sgt
[SMC_SINGLE_LINK
].sgl
,
448 buf_slot
->sgt
[SMC_SINGLE_LINK
].orig_nents
,
456 void smc_ib_buf_unmap_sg(struct smc_ib_device
*smcibdev
,
457 struct smc_buf_desc
*buf_slot
,
458 enum dma_data_direction data_direction
)
460 if (!buf_slot
->sgt
[SMC_SINGLE_LINK
].sgl
->dma_address
)
461 return; /* already unmapped */
463 ib_dma_unmap_sg(smcibdev
->ibdev
,
464 buf_slot
->sgt
[SMC_SINGLE_LINK
].sgl
,
465 buf_slot
->sgt
[SMC_SINGLE_LINK
].orig_nents
,
467 buf_slot
->sgt
[SMC_SINGLE_LINK
].sgl
->dma_address
= 0;
470 long smc_ib_setup_per_ibdev(struct smc_ib_device
*smcibdev
)
472 struct ib_cq_init_attr cqattr
= {
473 .cqe
= SMC_MAX_CQE
, .comp_vector
= 0 };
474 int cqe_size_order
, smc_order
;
477 /* the calculated number of cq entries fits to mlx5 cq allocation */
478 cqe_size_order
= cache_line_size() == 128 ? 7 : 6;
479 smc_order
= MAX_ORDER
- cqe_size_order
- 1;
480 if (SMC_MAX_CQE
+ 2 > (0x00000001 << smc_order
) * PAGE_SIZE
)
481 cqattr
.cqe
= (0x00000001 << smc_order
) * PAGE_SIZE
- 2;
482 smcibdev
->roce_cq_send
= ib_create_cq(smcibdev
->ibdev
,
483 smc_wr_tx_cq_handler
, NULL
,
485 rc
= PTR_ERR_OR_ZERO(smcibdev
->roce_cq_send
);
486 if (IS_ERR(smcibdev
->roce_cq_send
)) {
487 smcibdev
->roce_cq_send
= NULL
;
490 smcibdev
->roce_cq_recv
= ib_create_cq(smcibdev
->ibdev
,
491 smc_wr_rx_cq_handler
, NULL
,
493 rc
= PTR_ERR_OR_ZERO(smcibdev
->roce_cq_recv
);
494 if (IS_ERR(smcibdev
->roce_cq_recv
)) {
495 smcibdev
->roce_cq_recv
= NULL
;
498 smc_wr_add_dev(smcibdev
);
499 smcibdev
->initialized
= 1;
503 ib_destroy_cq(smcibdev
->roce_cq_send
);
507 static void smc_ib_cleanup_per_ibdev(struct smc_ib_device
*smcibdev
)
509 if (!smcibdev
->initialized
)
511 smcibdev
->initialized
= 0;
512 smc_wr_remove_dev(smcibdev
);
513 ib_destroy_cq(smcibdev
->roce_cq_recv
);
514 ib_destroy_cq(smcibdev
->roce_cq_send
);
517 static struct ib_client smc_ib_client
;
519 /* callback function for ib_register_client() */
520 static void smc_ib_add_dev(struct ib_device
*ibdev
)
522 struct smc_ib_device
*smcibdev
;
526 if (ibdev
->node_type
!= RDMA_NODE_IB_CA
)
529 smcibdev
= kzalloc(sizeof(*smcibdev
), GFP_KERNEL
);
533 smcibdev
->ibdev
= ibdev
;
534 INIT_WORK(&smcibdev
->port_event_work
, smc_ib_port_event_work
);
536 spin_lock(&smc_ib_devices
.lock
);
537 list_add_tail(&smcibdev
->list
, &smc_ib_devices
.list
);
538 spin_unlock(&smc_ib_devices
.lock
);
539 ib_set_client_data(ibdev
, &smc_ib_client
, smcibdev
);
540 INIT_IB_EVENT_HANDLER(&smcibdev
->event_handler
, smcibdev
->ibdev
,
541 smc_ib_global_event_handler
);
542 ib_register_event_handler(&smcibdev
->event_handler
);
544 /* trigger reading of the port attributes */
545 port_cnt
= smcibdev
->ibdev
->phys_port_cnt
;
547 i
< min_t(size_t, port_cnt
, SMC_MAX_PORTS
);
549 set_bit(i
, &smcibdev
->port_event_mask
);
550 /* determine pnetids of the port */
551 smc_pnetid_by_dev_port(ibdev
->dev
.parent
, i
,
552 smcibdev
->pnetid
[i
]);
554 schedule_work(&smcibdev
->port_event_work
);
557 /* callback function for ib_register_client() */
558 static void smc_ib_remove_dev(struct ib_device
*ibdev
, void *client_data
)
560 struct smc_ib_device
*smcibdev
;
562 smcibdev
= ib_get_client_data(ibdev
, &smc_ib_client
);
563 ib_set_client_data(ibdev
, &smc_ib_client
, NULL
);
564 spin_lock(&smc_ib_devices
.lock
);
565 list_del_init(&smcibdev
->list
); /* remove from smc_ib_devices */
566 spin_unlock(&smc_ib_devices
.lock
);
567 smc_ib_cleanup_per_ibdev(smcibdev
);
568 ib_unregister_event_handler(&smcibdev
->event_handler
);
572 static struct ib_client smc_ib_client
= {
574 .add
= smc_ib_add_dev
,
575 .remove
= smc_ib_remove_dev
,
578 int __init
smc_ib_register_client(void)
580 return ib_register_client(&smc_ib_client
);
583 void smc_ib_unregister_client(void)
585 ib_unregister_client(&smc_ib_client
);