2 * QEMU paravirtual RDMA - Command channel
4 * Copyright (C) 2018 Oracle
5 * Copyright (C) 2018 Red Hat Inc
8 * Yuval Shaia <yuval.shaia@oracle.com>
9 * Marcel Apfelbaum <marcel@redhat.com>
11 * This work is licensed under the terms of the GNU GPL, version 2 or later.
12 * See the COPYING file in the top-level directory.
16 #include "qemu/osdep.h"
17 #include "qemu/error-report.h"
20 #include "hw/pci/pci.h"
21 #include "hw/pci/pci_ids.h"
23 #include "../rdma_backend.h"
24 #include "../rdma_rm.h"
25 #include "../rdma_utils.h"
28 #include "standard-headers/rdma/vmw_pvrdma-abi.h"
30 static void *pvrdma_map_to_pdir(PCIDevice
*pdev
, uint64_t pdir_dma
,
31 uint32_t nchunks
, size_t length
)
34 int tbl_idx
, dir_idx
, addr_idx
;
35 void *host_virt
= NULL
, *curr_page
;
38 pr_dbg("nchunks=0\n");
42 dir
= rdma_pci_dma_map(pdev
, pdir_dma
, TARGET_PAGE_SIZE
);
44 error_report("PVRDMA: Failed to map to page directory");
48 tbl
= rdma_pci_dma_map(pdev
, dir
[0], TARGET_PAGE_SIZE
);
50 error_report("PVRDMA: Failed to map to page table 0");
54 curr_page
= rdma_pci_dma_map(pdev
, (dma_addr_t
)tbl
[0], TARGET_PAGE_SIZE
);
56 error_report("PVRDMA: Failed to map the first page");
60 host_virt
= mremap(curr_page
, 0, length
, MREMAP_MAYMOVE
);
61 pr_dbg("mremap %p -> %p\n", curr_page
, host_virt
);
62 if (host_virt
== MAP_FAILED
) {
64 error_report("PVRDMA: Failed to remap memory for host_virt");
68 rdma_pci_dma_unmap(pdev
, curr_page
, TARGET_PAGE_SIZE
);
70 pr_dbg("host_virt=%p\n", host_virt
);
75 while (addr_idx
< nchunks
) {
76 if (tbl_idx
== TARGET_PAGE_SIZE
/ sizeof(uint64_t)) {
79 pr_dbg("Mapping to table %d\n", dir_idx
);
80 rdma_pci_dma_unmap(pdev
, tbl
, TARGET_PAGE_SIZE
);
81 tbl
= rdma_pci_dma_map(pdev
, dir
[dir_idx
], TARGET_PAGE_SIZE
);
83 error_report("PVRDMA: Failed to map to page table %d", dir_idx
);
84 goto out_unmap_host_virt
;
88 pr_dbg("guest_dma[%d]=0x%" PRIx64
"\n", addr_idx
, tbl
[tbl_idx
]);
90 curr_page
= rdma_pci_dma_map(pdev
, (dma_addr_t
)tbl
[tbl_idx
],
93 error_report("PVRDMA: Failed to map to page %d, dir %d", tbl_idx
,
95 goto out_unmap_host_virt
;
98 mremap(curr_page
, 0, TARGET_PAGE_SIZE
, MREMAP_MAYMOVE
| MREMAP_FIXED
,
99 host_virt
+ TARGET_PAGE_SIZE
* addr_idx
);
101 rdma_pci_dma_unmap(pdev
, curr_page
, TARGET_PAGE_SIZE
);
111 munmap(host_virt
, length
);
115 rdma_pci_dma_unmap(pdev
, tbl
, TARGET_PAGE_SIZE
);
118 rdma_pci_dma_unmap(pdev
, dir
, TARGET_PAGE_SIZE
);
123 static int query_port(PVRDMADev
*dev
, union pvrdma_cmd_req
*req
,
124 union pvrdma_cmd_resp
*rsp
)
126 struct pvrdma_cmd_query_port
*cmd
= &req
->query_port
;
127 struct pvrdma_cmd_query_port_resp
*resp
= &rsp
->query_port_resp
;
128 struct pvrdma_port_attr attrs
= {0};
130 pr_dbg("port=%d\n", cmd
->port_num
);
132 if (rdma_backend_query_port(&dev
->backend_dev
,
133 (struct ibv_port_attr
*)&attrs
)) {
137 memset(resp
, 0, sizeof(*resp
));
138 resp
->hdr
.response
= cmd
->hdr
.response
;
139 resp
->hdr
.ack
= PVRDMA_CMD_QUERY_PORT_RESP
;
142 resp
->attrs
.state
= attrs
.state
;
143 resp
->attrs
.max_mtu
= attrs
.max_mtu
;
144 resp
->attrs
.active_mtu
= attrs
.active_mtu
;
145 resp
->attrs
.phys_state
= attrs
.phys_state
;
146 resp
->attrs
.gid_tbl_len
= MIN(MAX_PORT_GIDS
, attrs
.gid_tbl_len
);
147 resp
->attrs
.max_msg_sz
= 1024;
148 resp
->attrs
.pkey_tbl_len
= MIN(MAX_PORT_PKEYS
, attrs
.pkey_tbl_len
);
149 resp
->attrs
.active_width
= 1;
150 resp
->attrs
.active_speed
= 1;
155 static int query_pkey(PVRDMADev
*dev
, union pvrdma_cmd_req
*req
,
156 union pvrdma_cmd_resp
*rsp
)
158 struct pvrdma_cmd_query_pkey
*cmd
= &req
->query_pkey
;
159 struct pvrdma_cmd_query_pkey_resp
*resp
= &rsp
->query_pkey_resp
;
161 pr_dbg("port=%d\n", cmd
->port_num
);
162 pr_dbg("index=%d\n", cmd
->index
);
164 memset(resp
, 0, sizeof(*resp
));
165 resp
->hdr
.response
= cmd
->hdr
.response
;
166 resp
->hdr
.ack
= PVRDMA_CMD_QUERY_PKEY_RESP
;
169 resp
->pkey
= PVRDMA_PKEY
;
170 pr_dbg("pkey=0x%x\n", resp
->pkey
);
175 static int create_pd(PVRDMADev
*dev
, union pvrdma_cmd_req
*req
,
176 union pvrdma_cmd_resp
*rsp
)
178 struct pvrdma_cmd_create_pd
*cmd
= &req
->create_pd
;
179 struct pvrdma_cmd_create_pd_resp
*resp
= &rsp
->create_pd_resp
;
181 pr_dbg("context=0x%x\n", cmd
->ctx_handle
? cmd
->ctx_handle
: 0);
183 memset(resp
, 0, sizeof(*resp
));
184 resp
->hdr
.response
= cmd
->hdr
.response
;
185 resp
->hdr
.ack
= PVRDMA_CMD_CREATE_PD_RESP
;
186 resp
->hdr
.err
= rdma_rm_alloc_pd(&dev
->rdma_dev_res
, &dev
->backend_dev
,
187 &resp
->pd_handle
, cmd
->ctx_handle
);
189 pr_dbg("ret=%d\n", resp
->hdr
.err
);
190 return resp
->hdr
.err
;
193 static int destroy_pd(PVRDMADev
*dev
, union pvrdma_cmd_req
*req
,
194 union pvrdma_cmd_resp
*rsp
)
196 struct pvrdma_cmd_destroy_pd
*cmd
= &req
->destroy_pd
;
198 pr_dbg("pd_handle=%d\n", cmd
->pd_handle
);
200 rdma_rm_dealloc_pd(&dev
->rdma_dev_res
, cmd
->pd_handle
);
205 static int create_mr(PVRDMADev
*dev
, union pvrdma_cmd_req
*req
,
206 union pvrdma_cmd_resp
*rsp
)
208 struct pvrdma_cmd_create_mr
*cmd
= &req
->create_mr
;
209 struct pvrdma_cmd_create_mr_resp
*resp
= &rsp
->create_mr_resp
;
210 PCIDevice
*pci_dev
= PCI_DEVICE(dev
);
211 void *host_virt
= NULL
;
213 memset(resp
, 0, sizeof(*resp
));
214 resp
->hdr
.response
= cmd
->hdr
.response
;
215 resp
->hdr
.ack
= PVRDMA_CMD_CREATE_MR_RESP
;
217 pr_dbg("pd_handle=%d\n", cmd
->pd_handle
);
218 pr_dbg("access_flags=0x%x\n", cmd
->access_flags
);
219 pr_dbg("flags=0x%x\n", cmd
->flags
);
221 if (!(cmd
->flags
& PVRDMA_MR_FLAG_DMA
)) {
222 host_virt
= pvrdma_map_to_pdir(pci_dev
, cmd
->pdir_dma
, cmd
->nchunks
,
225 pr_dbg("Failed to map to pdir\n");
226 resp
->hdr
.err
= -EINVAL
;
231 resp
->hdr
.err
= rdma_rm_alloc_mr(&dev
->rdma_dev_res
, cmd
->pd_handle
,
232 cmd
->start
, cmd
->length
, host_virt
,
233 cmd
->access_flags
, &resp
->mr_handle
,
234 &resp
->lkey
, &resp
->rkey
);
235 if (host_virt
&& !resp
->hdr
.err
) {
236 munmap(host_virt
, cmd
->length
);
240 pr_dbg("ret=%d\n", resp
->hdr
.err
);
241 return resp
->hdr
.err
;
244 static int destroy_mr(PVRDMADev
*dev
, union pvrdma_cmd_req
*req
,
245 union pvrdma_cmd_resp
*rsp
)
247 struct pvrdma_cmd_destroy_mr
*cmd
= &req
->destroy_mr
;
249 pr_dbg("mr_handle=%d\n", cmd
->mr_handle
);
251 rdma_rm_dealloc_mr(&dev
->rdma_dev_res
, cmd
->mr_handle
);
256 static int create_cq_ring(PCIDevice
*pci_dev
, PvrdmaRing
**ring
,
257 uint64_t pdir_dma
, uint32_t nchunks
, uint32_t cqe
)
259 uint64_t *dir
= NULL
, *tbl
= NULL
;
262 char ring_name
[MAX_RING_NAME_SZ
];
264 pr_dbg("pdir_dma=0x%llx\n", (long long unsigned int)pdir_dma
);
265 dir
= rdma_pci_dma_map(pci_dev
, pdir_dma
, TARGET_PAGE_SIZE
);
267 pr_dbg("Failed to map to CQ page directory\n");
271 tbl
= rdma_pci_dma_map(pci_dev
, dir
[0], TARGET_PAGE_SIZE
);
273 pr_dbg("Failed to map to CQ page table\n");
277 r
= g_malloc(sizeof(*r
));
280 r
->ring_state
= (struct pvrdma_ring
*)
281 rdma_pci_dma_map(pci_dev
, tbl
[0], TARGET_PAGE_SIZE
);
283 if (!r
->ring_state
) {
284 pr_dbg("Failed to map to CQ ring state\n");
288 sprintf(ring_name
, "cq_ring_%" PRIx64
, pdir_dma
);
289 rc
= pvrdma_ring_init(r
, ring_name
, pci_dev
, &r
->ring_state
[1],
290 cqe
, sizeof(struct pvrdma_cqe
),
291 /* first page is ring state */
292 (dma_addr_t
*)&tbl
[1], nchunks
- 1);
294 goto out_unmap_ring_state
;
299 out_unmap_ring_state
:
300 /* ring_state was in slot 1, not 0 so need to jump back */
301 rdma_pci_dma_unmap(pci_dev
, --r
->ring_state
, TARGET_PAGE_SIZE
);
307 rdma_pci_dma_unmap(pci_dev
, tbl
, TARGET_PAGE_SIZE
);
308 rdma_pci_dma_unmap(pci_dev
, dir
, TARGET_PAGE_SIZE
);
313 static int create_cq(PVRDMADev
*dev
, union pvrdma_cmd_req
*req
,
314 union pvrdma_cmd_resp
*rsp
)
316 struct pvrdma_cmd_create_cq
*cmd
= &req
->create_cq
;
317 struct pvrdma_cmd_create_cq_resp
*resp
= &rsp
->create_cq_resp
;
318 PvrdmaRing
*ring
= NULL
;
320 memset(resp
, 0, sizeof(*resp
));
321 resp
->hdr
.response
= cmd
->hdr
.response
;
322 resp
->hdr
.ack
= PVRDMA_CMD_CREATE_CQ_RESP
;
324 resp
->cqe
= cmd
->cqe
;
326 resp
->hdr
.err
= create_cq_ring(PCI_DEVICE(dev
), &ring
, cmd
->pdir_dma
,
327 cmd
->nchunks
, cmd
->cqe
);
332 pr_dbg("ring=%p\n", ring
);
334 resp
->hdr
.err
= rdma_rm_alloc_cq(&dev
->rdma_dev_res
, &dev
->backend_dev
,
335 cmd
->cqe
, &resp
->cq_handle
, ring
);
336 resp
->cqe
= cmd
->cqe
;
339 pr_dbg("ret=%d\n", resp
->hdr
.err
);
340 return resp
->hdr
.err
;
343 static int destroy_cq(PVRDMADev
*dev
, union pvrdma_cmd_req
*req
,
344 union pvrdma_cmd_resp
*rsp
)
346 struct pvrdma_cmd_destroy_cq
*cmd
= &req
->destroy_cq
;
350 pr_dbg("cq_handle=%d\n", cmd
->cq_handle
);
352 cq
= rdma_rm_get_cq(&dev
->rdma_dev_res
, cmd
->cq_handle
);
354 pr_dbg("Invalid CQ handle\n");
358 ring
= (PvrdmaRing
*)cq
->opaque
;
359 pvrdma_ring_free(ring
);
360 /* ring_state was in slot 1, not 0 so need to jump back */
361 rdma_pci_dma_unmap(PCI_DEVICE(dev
), --ring
->ring_state
, TARGET_PAGE_SIZE
);
364 rdma_rm_dealloc_cq(&dev
->rdma_dev_res
, cmd
->cq_handle
);
369 static int create_qp_rings(PCIDevice
*pci_dev
, uint64_t pdir_dma
,
370 PvrdmaRing
**rings
, uint32_t scqe
, uint32_t smax_sge
,
371 uint32_t spages
, uint32_t rcqe
, uint32_t rmax_sge
,
374 uint64_t *dir
= NULL
, *tbl
= NULL
;
377 char ring_name
[MAX_RING_NAME_SZ
];
380 pr_dbg("pdir_dma=0x%llx\n", (long long unsigned int)pdir_dma
);
381 dir
= rdma_pci_dma_map(pci_dev
, pdir_dma
, TARGET_PAGE_SIZE
);
383 pr_dbg("Failed to map to CQ page directory\n");
387 tbl
= rdma_pci_dma_map(pci_dev
, dir
[0], TARGET_PAGE_SIZE
);
389 pr_dbg("Failed to map to CQ page table\n");
393 sr
= g_malloc(2 * sizeof(*rr
));
395 pr_dbg("sring=%p\n", sr
);
396 pr_dbg("rring=%p\n", rr
);
400 pr_dbg("scqe=%d\n", scqe
);
401 pr_dbg("smax_sge=%d\n", smax_sge
);
402 pr_dbg("spages=%d\n", spages
);
403 pr_dbg("rcqe=%d\n", rcqe
);
404 pr_dbg("rmax_sge=%d\n", rmax_sge
);
405 pr_dbg("rpages=%d\n", rpages
);
407 /* Create send ring */
408 sr
->ring_state
= (struct pvrdma_ring
*)
409 rdma_pci_dma_map(pci_dev
, tbl
[0], TARGET_PAGE_SIZE
);
410 if (!sr
->ring_state
) {
411 pr_dbg("Failed to map to CQ ring state\n");
412 goto out_free_sr_mem
;
415 wqe_sz
= pow2ceil(sizeof(struct pvrdma_sq_wqe_hdr
) +
416 sizeof(struct pvrdma_sge
) * smax_sge
- 1);
418 sprintf(ring_name
, "qp_sring_%" PRIx64
, pdir_dma
);
419 rc
= pvrdma_ring_init(sr
, ring_name
, pci_dev
, sr
->ring_state
,
420 scqe
, wqe_sz
, (dma_addr_t
*)&tbl
[1], spages
);
422 goto out_unmap_ring_state
;
425 /* Create recv ring */
426 rr
->ring_state
= &sr
->ring_state
[1];
427 wqe_sz
= pow2ceil(sizeof(struct pvrdma_rq_wqe_hdr
) +
428 sizeof(struct pvrdma_sge
) * rmax_sge
- 1);
429 sprintf(ring_name
, "qp_rring_%" PRIx64
, pdir_dma
);
430 rc
= pvrdma_ring_init(rr
, ring_name
, pci_dev
, rr
->ring_state
,
431 rcqe
, wqe_sz
, (dma_addr_t
*)&tbl
[1 + spages
], rpages
);
439 pvrdma_ring_free(sr
);
441 out_unmap_ring_state
:
442 rdma_pci_dma_unmap(pci_dev
, sr
->ring_state
, TARGET_PAGE_SIZE
);
448 rdma_pci_dma_unmap(pci_dev
, tbl
, TARGET_PAGE_SIZE
);
449 rdma_pci_dma_unmap(pci_dev
, dir
, TARGET_PAGE_SIZE
);
454 static int create_qp(PVRDMADev
*dev
, union pvrdma_cmd_req
*req
,
455 union pvrdma_cmd_resp
*rsp
)
457 struct pvrdma_cmd_create_qp
*cmd
= &req
->create_qp
;
458 struct pvrdma_cmd_create_qp_resp
*resp
= &rsp
->create_qp_resp
;
459 PvrdmaRing
*rings
= NULL
;
461 memset(resp
, 0, sizeof(*resp
));
462 resp
->hdr
.response
= cmd
->hdr
.response
;
463 resp
->hdr
.ack
= PVRDMA_CMD_CREATE_QP_RESP
;
465 pr_dbg("total_chunks=%d\n", cmd
->total_chunks
);
466 pr_dbg("send_chunks=%d\n", cmd
->send_chunks
);
468 resp
->hdr
.err
= create_qp_rings(PCI_DEVICE(dev
), cmd
->pdir_dma
, &rings
,
469 cmd
->max_send_wr
, cmd
->max_send_sge
,
470 cmd
->send_chunks
, cmd
->max_recv_wr
,
471 cmd
->max_recv_sge
, cmd
->total_chunks
-
472 cmd
->send_chunks
- 1);
477 pr_dbg("rings=%p\n", rings
);
479 resp
->hdr
.err
= rdma_rm_alloc_qp(&dev
->rdma_dev_res
, cmd
->pd_handle
,
480 cmd
->qp_type
, cmd
->max_send_wr
,
481 cmd
->max_send_sge
, cmd
->send_cq_handle
,
482 cmd
->max_recv_wr
, cmd
->max_recv_sge
,
483 cmd
->recv_cq_handle
, rings
, &resp
->qpn
);
485 resp
->max_send_wr
= cmd
->max_send_wr
;
486 resp
->max_recv_wr
= cmd
->max_recv_wr
;
487 resp
->max_send_sge
= cmd
->max_send_sge
;
488 resp
->max_recv_sge
= cmd
->max_recv_sge
;
489 resp
->max_inline_data
= cmd
->max_inline_data
;
492 pr_dbg("ret=%d\n", resp
->hdr
.err
);
493 return resp
->hdr
.err
;
496 static int modify_qp(PVRDMADev
*dev
, union pvrdma_cmd_req
*req
,
497 union pvrdma_cmd_resp
*rsp
)
499 struct pvrdma_cmd_modify_qp
*cmd
= &req
->modify_qp
;
501 pr_dbg("qp_handle=%d\n", cmd
->qp_handle
);
503 memset(rsp
, 0, sizeof(*rsp
));
504 rsp
->hdr
.response
= cmd
->hdr
.response
;
505 rsp
->hdr
.ack
= PVRDMA_CMD_MODIFY_QP_RESP
;
507 rsp
->hdr
.err
= rdma_rm_modify_qp(&dev
->rdma_dev_res
, &dev
->backend_dev
,
508 cmd
->qp_handle
, cmd
->attr_mask
,
509 (union ibv_gid
*)&cmd
->attrs
.ah_attr
.grh
.dgid
,
510 cmd
->attrs
.dest_qp_num
,
511 (enum ibv_qp_state
)cmd
->attrs
.qp_state
,
512 cmd
->attrs
.qkey
, cmd
->attrs
.rq_psn
,
515 pr_dbg("ret=%d\n", rsp
->hdr
.err
);
519 static int query_qp(PVRDMADev
*dev
, union pvrdma_cmd_req
*req
,
520 union pvrdma_cmd_resp
*rsp
)
522 struct pvrdma_cmd_query_qp
*cmd
= &req
->query_qp
;
523 struct pvrdma_cmd_query_qp_resp
*resp
= &rsp
->query_qp_resp
;
524 struct ibv_qp_init_attr init_attr
;
526 pr_dbg("qp_handle=%d\n", cmd
->qp_handle
);
527 pr_dbg("attr_mask=0x%x\n", cmd
->attr_mask
);
529 memset(rsp
, 0, sizeof(*rsp
));
530 rsp
->hdr
.response
= cmd
->hdr
.response
;
531 rsp
->hdr
.ack
= PVRDMA_CMD_QUERY_QP_RESP
;
533 rsp
->hdr
.err
= rdma_rm_query_qp(&dev
->rdma_dev_res
, &dev
->backend_dev
,
535 (struct ibv_qp_attr
*)&resp
->attrs
,
536 cmd
->attr_mask
, &init_attr
);
538 pr_dbg("ret=%d\n", rsp
->hdr
.err
);
542 static int destroy_qp(PVRDMADev
*dev
, union pvrdma_cmd_req
*req
,
543 union pvrdma_cmd_resp
*rsp
)
545 struct pvrdma_cmd_destroy_qp
*cmd
= &req
->destroy_qp
;
549 qp
= rdma_rm_get_qp(&dev
->rdma_dev_res
, cmd
->qp_handle
);
551 pr_dbg("Invalid QP handle\n");
555 rdma_rm_dealloc_qp(&dev
->rdma_dev_res
, cmd
->qp_handle
);
557 ring
= (PvrdmaRing
*)qp
->opaque
;
558 pr_dbg("sring=%p\n", &ring
[0]);
559 pvrdma_ring_free(&ring
[0]);
560 pr_dbg("rring=%p\n", &ring
[1]);
561 pvrdma_ring_free(&ring
[1]);
563 rdma_pci_dma_unmap(PCI_DEVICE(dev
), ring
->ring_state
, TARGET_PAGE_SIZE
);
569 static int create_bind(PVRDMADev
*dev
, union pvrdma_cmd_req
*req
,
570 union pvrdma_cmd_resp
*rsp
)
572 struct pvrdma_cmd_create_bind
*cmd
= &req
->create_bind
;
574 __be64
*subnet
= (__be64
*)&cmd
->new_gid
[0];
575 __be64
*if_id
= (__be64
*)&cmd
->new_gid
[8];
578 pr_dbg("index=%d\n", cmd
->index
);
580 if (cmd
->index
>= MAX_PORT_GIDS
) {
584 pr_dbg("gid[%d]=0x%llx,0x%llx\n", cmd
->index
,
585 (long long unsigned int)be64_to_cpu(*subnet
),
586 (long long unsigned int)be64_to_cpu(*if_id
));
588 /* Driver forces to one port only */
589 memcpy(dev
->rdma_dev_res
.ports
[0].gid_tbl
[cmd
->index
].raw
, &cmd
->new_gid
,
590 sizeof(cmd
->new_gid
));
592 /* TODO: Since drivers stores node_guid at load_dsr phase then this
593 * assignment is not relevant, i need to figure out a way how to
594 * retrieve MAC of our netdev */
595 dev
->node_guid
= dev
->rdma_dev_res
.ports
[0].gid_tbl
[0].global
.interface_id
;
596 pr_dbg("dev->node_guid=0x%llx\n",
597 (long long unsigned int)be64_to_cpu(dev
->node_guid
));
602 static int destroy_bind(PVRDMADev
*dev
, union pvrdma_cmd_req
*req
,
603 union pvrdma_cmd_resp
*rsp
)
605 struct pvrdma_cmd_destroy_bind
*cmd
= &req
->destroy_bind
;
607 pr_dbg("index=%d\n", cmd
->index
);
609 if (cmd
->index
>= MAX_PORT_GIDS
) {
613 memset(dev
->rdma_dev_res
.ports
[0].gid_tbl
[cmd
->index
].raw
, 0,
614 sizeof(dev
->rdma_dev_res
.ports
[0].gid_tbl
[cmd
->index
].raw
));
619 static int create_uc(PVRDMADev
*dev
, union pvrdma_cmd_req
*req
,
620 union pvrdma_cmd_resp
*rsp
)
622 struct pvrdma_cmd_create_uc
*cmd
= &req
->create_uc
;
623 struct pvrdma_cmd_create_uc_resp
*resp
= &rsp
->create_uc_resp
;
625 pr_dbg("pfn=%d\n", cmd
->pfn
);
627 memset(resp
, 0, sizeof(*resp
));
628 resp
->hdr
.response
= cmd
->hdr
.response
;
629 resp
->hdr
.ack
= PVRDMA_CMD_CREATE_UC_RESP
;
630 resp
->hdr
.err
= rdma_rm_alloc_uc(&dev
->rdma_dev_res
, cmd
->pfn
,
633 pr_dbg("ret=%d\n", resp
->hdr
.err
);
638 static int destroy_uc(PVRDMADev
*dev
, union pvrdma_cmd_req
*req
,
639 union pvrdma_cmd_resp
*rsp
)
641 struct pvrdma_cmd_destroy_uc
*cmd
= &req
->destroy_uc
;
643 pr_dbg("ctx_handle=%d\n", cmd
->ctx_handle
);
645 rdma_rm_dealloc_uc(&dev
->rdma_dev_res
, cmd
->ctx_handle
);
651 int (*exec
)(PVRDMADev
*dev
, union pvrdma_cmd_req
*req
,
652 union pvrdma_cmd_resp
*rsp
);
655 static struct cmd_handler cmd_handlers
[] = {
656 {PVRDMA_CMD_QUERY_PORT
, query_port
},
657 {PVRDMA_CMD_QUERY_PKEY
, query_pkey
},
658 {PVRDMA_CMD_CREATE_PD
, create_pd
},
659 {PVRDMA_CMD_DESTROY_PD
, destroy_pd
},
660 {PVRDMA_CMD_CREATE_MR
, create_mr
},
661 {PVRDMA_CMD_DESTROY_MR
, destroy_mr
},
662 {PVRDMA_CMD_CREATE_CQ
, create_cq
},
663 {PVRDMA_CMD_RESIZE_CQ
, NULL
},
664 {PVRDMA_CMD_DESTROY_CQ
, destroy_cq
},
665 {PVRDMA_CMD_CREATE_QP
, create_qp
},
666 {PVRDMA_CMD_MODIFY_QP
, modify_qp
},
667 {PVRDMA_CMD_QUERY_QP
, query_qp
},
668 {PVRDMA_CMD_DESTROY_QP
, destroy_qp
},
669 {PVRDMA_CMD_CREATE_UC
, create_uc
},
670 {PVRDMA_CMD_DESTROY_UC
, destroy_uc
},
671 {PVRDMA_CMD_CREATE_BIND
, create_bind
},
672 {PVRDMA_CMD_DESTROY_BIND
, destroy_bind
},
675 int execute_command(PVRDMADev
*dev
)
680 dsr_info
= &dev
->dsr_info
;
682 pr_dbg("cmd=%d\n", dsr_info
->req
->hdr
.cmd
);
683 if (dsr_info
->req
->hdr
.cmd
>= sizeof(cmd_handlers
) /
684 sizeof(struct cmd_handler
)) {
685 pr_dbg("Unsupported command\n");
689 if (!cmd_handlers
[dsr_info
->req
->hdr
.cmd
].exec
) {
690 pr_dbg("Unsupported command (not implemented yet)\n");
694 err
= cmd_handlers
[dsr_info
->req
->hdr
.cmd
].exec(dev
, dsr_info
->req
,
697 set_reg_val(dev
, PVRDMA_REG_ERR
, err
);
698 post_interrupt(dev
, INTR_VEC_CMD_RING
);
700 return (err
== 0) ? 0 : -EINVAL
;