2 * QEMU paravirtual RDMA - Resource Manager Implementation
4 * Copyright (C) 2018 Oracle
5 * Copyright (C) 2018 Red Hat Inc
8 * Yuval Shaia <yuval.shaia@oracle.com>
9 * Marcel Apfelbaum <marcel@redhat.com>
11 * This work is licensed under the terms of the GNU GPL, version 2 or later.
12 * See the COPYING file in the top-level directory.
16 #include "qemu/osdep.h"
17 #include "qapi/error.h"
20 #include "rdma_utils.h"
21 #include "rdma_backend.h"
24 /* Page directory and page tables */
25 #define PG_DIR_SZ { TARGET_PAGE_SIZE / sizeof(__u64) }
26 #define PG_TBL_SZ { TARGET_PAGE_SIZE / sizeof(__u64) }
28 static inline void res_tbl_init(const char *name
, RdmaRmResTbl
*tbl
,
29 uint32_t tbl_sz
, uint32_t res_sz
)
31 tbl
->tbl
= g_malloc(tbl_sz
* res_sz
);
33 strncpy(tbl
->name
, name
, MAX_RM_TBL_NAME
);
34 tbl
->name
[MAX_RM_TBL_NAME
- 1] = 0;
36 tbl
->bitmap
= bitmap_new(tbl_sz
);
39 qemu_mutex_init(&tbl
->lock
);
42 static inline void res_tbl_free(RdmaRmResTbl
*tbl
)
44 qemu_mutex_destroy(&tbl
->lock
);
46 bitmap_zero_extend(tbl
->bitmap
, tbl
->tbl_sz
, 0);
49 static inline void *res_tbl_get(RdmaRmResTbl
*tbl
, uint32_t handle
)
51 pr_dbg("%s, handle=%d\n", tbl
->name
, handle
);
53 if ((handle
< tbl
->tbl_sz
) && (test_bit(handle
, tbl
->bitmap
))) {
54 return tbl
->tbl
+ handle
* tbl
->res_sz
;
56 pr_dbg("Invalid handle %d\n", handle
);
61 static inline void *res_tbl_alloc(RdmaRmResTbl
*tbl
, uint32_t *handle
)
63 qemu_mutex_lock(&tbl
->lock
);
65 *handle
= find_first_zero_bit(tbl
->bitmap
, tbl
->tbl_sz
);
66 if (*handle
> tbl
->tbl_sz
) {
67 pr_dbg("Failed to alloc, bitmap is full\n");
68 qemu_mutex_unlock(&tbl
->lock
);
72 set_bit(*handle
, tbl
->bitmap
);
74 qemu_mutex_unlock(&tbl
->lock
);
76 memset(tbl
->tbl
+ *handle
* tbl
->res_sz
, 0, tbl
->res_sz
);
78 pr_dbg("%s, handle=%d\n", tbl
->name
, *handle
);
80 return tbl
->tbl
+ *handle
* tbl
->res_sz
;
83 static inline void res_tbl_dealloc(RdmaRmResTbl
*tbl
, uint32_t handle
)
85 pr_dbg("%s, handle=%d\n", tbl
->name
, handle
);
87 qemu_mutex_lock(&tbl
->lock
);
89 if (handle
< tbl
->tbl_sz
) {
90 clear_bit(handle
, tbl
->bitmap
);
93 qemu_mutex_unlock(&tbl
->lock
);
96 int rdma_rm_alloc_pd(RdmaDeviceResources
*dev_res
, RdmaBackendDev
*backend_dev
,
97 uint32_t *pd_handle
, uint32_t ctx_handle
)
102 pd
= res_tbl_alloc(&dev_res
->pd_tbl
, pd_handle
);
107 ret
= rdma_backend_create_pd(backend_dev
, &pd
->backend_pd
);
110 goto out_tbl_dealloc
;
113 pd
->ctx_handle
= ctx_handle
;
118 res_tbl_dealloc(&dev_res
->pd_tbl
, *pd_handle
);
124 RdmaRmPD
*rdma_rm_get_pd(RdmaDeviceResources
*dev_res
, uint32_t pd_handle
)
126 return res_tbl_get(&dev_res
->pd_tbl
, pd_handle
);
129 void rdma_rm_dealloc_pd(RdmaDeviceResources
*dev_res
, uint32_t pd_handle
)
131 RdmaRmPD
*pd
= rdma_rm_get_pd(dev_res
, pd_handle
);
134 rdma_backend_destroy_pd(&pd
->backend_pd
);
135 res_tbl_dealloc(&dev_res
->pd_tbl
, pd_handle
);
139 int rdma_rm_alloc_mr(RdmaDeviceResources
*dev_res
, uint32_t pd_handle
,
140 uint64_t guest_start
, size_t guest_length
, void *host_virt
,
141 int access_flags
, uint32_t *mr_handle
, uint32_t *lkey
,
148 pd
= rdma_rm_get_pd(dev_res
, pd_handle
);
150 pr_dbg("Invalid PD\n");
154 mr
= res_tbl_alloc(&dev_res
->mr_tbl
, mr_handle
);
156 pr_dbg("Failed to allocate obj in table\n");
159 pr_dbg("mr_handle=%d\n", *mr_handle
);
161 pr_dbg("host_virt=0x%p\n", host_virt
);
162 pr_dbg("guest_start=0x%" PRIx64
"\n", guest_start
);
163 pr_dbg("length=%zu\n", guest_length
);
166 mr
->virt
= host_virt
;
167 mr
->start
= guest_start
;
168 mr
->length
= guest_length
;
169 mr
->virt
+= (mr
->start
& (TARGET_PAGE_SIZE
- 1));
171 ret
= rdma_backend_create_mr(&mr
->backend_mr
, &pd
->backend_pd
, mr
->virt
,
172 mr
->length
, access_flags
);
174 pr_dbg("Fail in rdma_backend_create_mr, err=%d\n", ret
);
180 /* We keep mr_handle in lkey so send and recv get get mr ptr */
184 mr
->pd_handle
= pd_handle
;
189 res_tbl_dealloc(&dev_res
->mr_tbl
, *mr_handle
);
194 RdmaRmMR
*rdma_rm_get_mr(RdmaDeviceResources
*dev_res
, uint32_t mr_handle
)
196 return res_tbl_get(&dev_res
->mr_tbl
, mr_handle
);
199 void rdma_rm_dealloc_mr(RdmaDeviceResources
*dev_res
, uint32_t mr_handle
)
201 RdmaRmMR
*mr
= rdma_rm_get_mr(dev_res
, mr_handle
);
204 rdma_backend_destroy_mr(&mr
->backend_mr
);
205 pr_dbg("start=0x%" PRIx64
"\n", mr
->start
);
207 mr
->virt
-= (mr
->start
& (TARGET_PAGE_SIZE
- 1));
208 munmap(mr
->virt
, mr
->length
);
210 res_tbl_dealloc(&dev_res
->mr_tbl
, mr_handle
);
214 int rdma_rm_alloc_uc(RdmaDeviceResources
*dev_res
, uint32_t pfn
,
219 /* TODO: Need to make sure pfn is between bar start address and
220 * bsd+RDMA_BAR2_UAR_SIZE
221 if (pfn > RDMA_BAR2_UAR_SIZE) {
222 pr_err("pfn out of range (%d > %d)\n", pfn, RDMA_BAR2_UAR_SIZE);
227 uc
= res_tbl_alloc(&dev_res
->uc_tbl
, uc_handle
);
235 RdmaRmUC
*rdma_rm_get_uc(RdmaDeviceResources
*dev_res
, uint32_t uc_handle
)
237 return res_tbl_get(&dev_res
->uc_tbl
, uc_handle
);
240 void rdma_rm_dealloc_uc(RdmaDeviceResources
*dev_res
, uint32_t uc_handle
)
242 RdmaRmUC
*uc
= rdma_rm_get_uc(dev_res
, uc_handle
);
245 res_tbl_dealloc(&dev_res
->uc_tbl
, uc_handle
);
249 RdmaRmCQ
*rdma_rm_get_cq(RdmaDeviceResources
*dev_res
, uint32_t cq_handle
)
251 return res_tbl_get(&dev_res
->cq_tbl
, cq_handle
);
254 int rdma_rm_alloc_cq(RdmaDeviceResources
*dev_res
, RdmaBackendDev
*backend_dev
,
255 uint32_t cqe
, uint32_t *cq_handle
, void *opaque
)
260 cq
= res_tbl_alloc(&dev_res
->cq_tbl
, cq_handle
);
268 rc
= rdma_backend_create_cq(backend_dev
, &cq
->backend_cq
, cqe
);
277 rdma_rm_dealloc_cq(dev_res
, *cq_handle
);
282 void rdma_rm_req_notify_cq(RdmaDeviceResources
*dev_res
, uint32_t cq_handle
,
287 pr_dbg("cq_handle=%d, notify=0x%x\n", cq_handle
, notify
);
289 cq
= rdma_rm_get_cq(dev_res
, cq_handle
);
295 pr_dbg("notify=%d\n", cq
->notify
);
298 void rdma_rm_dealloc_cq(RdmaDeviceResources
*dev_res
, uint32_t cq_handle
)
302 cq
= rdma_rm_get_cq(dev_res
, cq_handle
);
307 rdma_backend_destroy_cq(&cq
->backend_cq
);
309 res_tbl_dealloc(&dev_res
->cq_tbl
, cq_handle
);
312 RdmaRmQP
*rdma_rm_get_qp(RdmaDeviceResources
*dev_res
, uint32_t qpn
)
314 GBytes
*key
= g_bytes_new(&qpn
, sizeof(qpn
));
316 RdmaRmQP
*qp
= g_hash_table_lookup(dev_res
->qp_hash
, key
);
323 int rdma_rm_alloc_qp(RdmaDeviceResources
*dev_res
, uint32_t pd_handle
,
324 uint8_t qp_type
, uint32_t max_send_wr
,
325 uint32_t max_send_sge
, uint32_t send_cq_handle
,
326 uint32_t max_recv_wr
, uint32_t max_recv_sge
,
327 uint32_t recv_cq_handle
, void *opaque
, uint32_t *qpn
)
335 pr_dbg("qp_type=%d\n", qp_type
);
337 pd
= rdma_rm_get_pd(dev_res
, pd_handle
);
339 pr_err("Invalid pd handle (%d)\n", pd_handle
);
343 scq
= rdma_rm_get_cq(dev_res
, send_cq_handle
);
344 rcq
= rdma_rm_get_cq(dev_res
, recv_cq_handle
);
347 pr_err("Invalid send_cqn or recv_cqn (%d, %d)\n",
348 send_cq_handle
, recv_cq_handle
);
352 qp
= res_tbl_alloc(&dev_res
->qp_tbl
, &rm_qpn
);
356 pr_dbg("rm_qpn=%d\n", rm_qpn
);
359 qp
->qp_state
= IBV_QPS_RESET
;
360 qp
->qp_type
= qp_type
;
361 qp
->send_cq_handle
= send_cq_handle
;
362 qp
->recv_cq_handle
= recv_cq_handle
;
365 rc
= rdma_backend_create_qp(&qp
->backend_qp
, qp_type
, &pd
->backend_pd
,
366 &scq
->backend_cq
, &rcq
->backend_cq
, max_send_wr
,
367 max_recv_wr
, max_send_sge
, max_recv_sge
);
373 *qpn
= rdma_backend_qpn(&qp
->backend_qp
);
374 pr_dbg("rm_qpn=%d, backend_qpn=0x%x\n", rm_qpn
, *qpn
);
375 g_hash_table_insert(dev_res
->qp_hash
, g_bytes_new(qpn
, sizeof(*qpn
)), qp
);
380 res_tbl_dealloc(&dev_res
->qp_tbl
, qp
->qpn
);
385 int rdma_rm_modify_qp(RdmaDeviceResources
*dev_res
, RdmaBackendDev
*backend_dev
,
386 uint32_t qp_handle
, uint32_t attr_mask
,
387 union ibv_gid
*dgid
, uint32_t dqpn
,
388 enum ibv_qp_state qp_state
, uint32_t qkey
,
389 uint32_t rq_psn
, uint32_t sq_psn
)
394 pr_dbg("qpn=0x%x\n", qp_handle
);
396 qp
= rdma_rm_get_qp(dev_res
, qp_handle
);
401 pr_dbg("qp_type=%d\n", qp
->qp_type
);
402 pr_dbg("attr_mask=0x%x\n", attr_mask
);
404 if (qp
->qp_type
== IBV_QPT_SMI
) {
405 pr_dbg("QP0 unsupported\n");
407 } else if (qp
->qp_type
== IBV_QPT_GSI
) {
412 if (attr_mask
& IBV_QP_STATE
) {
413 qp
->qp_state
= qp_state
;
414 pr_dbg("qp_state=%d\n", qp
->qp_state
);
416 if (qp
->qp_state
== IBV_QPS_INIT
) {
417 ret
= rdma_backend_qp_state_init(backend_dev
, &qp
->backend_qp
,
424 if (qp
->qp_state
== IBV_QPS_RTR
) {
425 ret
= rdma_backend_qp_state_rtr(backend_dev
, &qp
->backend_qp
,
426 qp
->qp_type
, dgid
, dqpn
, rq_psn
,
427 qkey
, attr_mask
& IBV_QP_QKEY
);
433 if (qp
->qp_state
== IBV_QPS_RTS
) {
434 ret
= rdma_backend_qp_state_rts(&qp
->backend_qp
, qp
->qp_type
,
436 attr_mask
& IBV_QP_QKEY
);
446 int rdma_rm_query_qp(RdmaDeviceResources
*dev_res
, RdmaBackendDev
*backend_dev
,
447 uint32_t qp_handle
, struct ibv_qp_attr
*attr
,
448 int attr_mask
, struct ibv_qp_init_attr
*init_attr
)
452 pr_dbg("qpn=0x%x\n", qp_handle
);
454 qp
= rdma_rm_get_qp(dev_res
, qp_handle
);
459 pr_dbg("qp_type=%d\n", qp
->qp_type
);
461 return rdma_backend_query_qp(&qp
->backend_qp
, attr
, attr_mask
, init_attr
);
464 void rdma_rm_dealloc_qp(RdmaDeviceResources
*dev_res
, uint32_t qp_handle
)
469 key
= g_bytes_new(&qp_handle
, sizeof(qp_handle
));
470 qp
= g_hash_table_lookup(dev_res
->qp_hash
, key
);
471 g_hash_table_remove(dev_res
->qp_hash
, key
);
478 rdma_backend_destroy_qp(&qp
->backend_qp
);
480 res_tbl_dealloc(&dev_res
->qp_tbl
, qp
->qpn
);
483 void *rdma_rm_get_cqe_ctx(RdmaDeviceResources
*dev_res
, uint32_t cqe_ctx_id
)
487 cqe_ctx
= res_tbl_get(&dev_res
->cqe_ctx_tbl
, cqe_ctx_id
);
492 pr_dbg("ctx=%p\n", *cqe_ctx
);
497 int rdma_rm_alloc_cqe_ctx(RdmaDeviceResources
*dev_res
, uint32_t *cqe_ctx_id
,
502 cqe_ctx
= res_tbl_alloc(&dev_res
->cqe_ctx_tbl
, cqe_ctx_id
);
507 pr_dbg("ctx=%p\n", ctx
);
513 void rdma_rm_dealloc_cqe_ctx(RdmaDeviceResources
*dev_res
, uint32_t cqe_ctx_id
)
515 res_tbl_dealloc(&dev_res
->cqe_ctx_tbl
, cqe_ctx_id
);
518 static void destroy_qp_hash_key(gpointer data
)
523 int rdma_rm_init(RdmaDeviceResources
*dev_res
, struct ibv_device_attr
*dev_attr
,
526 dev_res
->qp_hash
= g_hash_table_new_full(g_bytes_hash
, g_bytes_equal
,
527 destroy_qp_hash_key
, NULL
);
528 if (!dev_res
->qp_hash
) {
532 res_tbl_init("PD", &dev_res
->pd_tbl
, dev_attr
->max_pd
, sizeof(RdmaRmPD
));
533 res_tbl_init("CQ", &dev_res
->cq_tbl
, dev_attr
->max_cq
, sizeof(RdmaRmCQ
));
534 res_tbl_init("MR", &dev_res
->mr_tbl
, dev_attr
->max_mr
, sizeof(RdmaRmMR
));
535 res_tbl_init("QP", &dev_res
->qp_tbl
, dev_attr
->max_qp
, sizeof(RdmaRmQP
));
536 res_tbl_init("CQE_CTX", &dev_res
->cqe_ctx_tbl
, dev_attr
->max_qp
*
537 dev_attr
->max_qp_wr
, sizeof(void *));
538 res_tbl_init("UC", &dev_res
->uc_tbl
, MAX_UCS
, sizeof(RdmaRmUC
));
543 void rdma_rm_fini(RdmaDeviceResources
*dev_res
)
545 res_tbl_free(&dev_res
->uc_tbl
);
546 res_tbl_free(&dev_res
->cqe_ctx_tbl
);
547 res_tbl_free(&dev_res
->qp_tbl
);
548 res_tbl_free(&dev_res
->mr_tbl
);
549 res_tbl_free(&dev_res
->cq_tbl
);
550 res_tbl_free(&dev_res
->pd_tbl
);
552 g_hash_table_destroy(dev_res
->qp_hash
);