2 * QEMU paravirtual RDMA
4 * Copyright (C) 2018 Oracle
5 * Copyright (C) 2018 Red Hat Inc
8 * Yuval Shaia <yuval.shaia@oracle.com>
9 * Marcel Apfelbaum <marcel@redhat.com>
11 * This work is licensed under the terms of the GNU GPL, version 2 or later.
12 * See the COPYING file in the top-level directory.
16 #include "qemu/osdep.h"
17 #include "qapi/error.h"
18 #include "qemu/module.h"
19 #include "hw/pci/pci.h"
20 #include "hw/pci/pci_ids.h"
21 #include "hw/pci/msi.h"
22 #include "hw/pci/msix.h"
23 #include "hw/qdev-properties.h"
24 #include "hw/qdev-properties-system.h"
27 #include "monitor/monitor.h"
28 #include "hw/rdma/rdma.h"
30 #include "../rdma_rm.h"
31 #include "../rdma_backend.h"
32 #include "../rdma_utils.h"
34 #include <infiniband/verbs.h>
36 #include "standard-headers/rdma/vmw_pvrdma-abi.h"
37 #include "sysemu/runstate.h"
38 #include "standard-headers/drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h"
39 #include "pvrdma_qp_ops.h"
41 static Property pvrdma_dev_properties
[] = {
42 DEFINE_PROP_STRING("netdev", PVRDMADev
, backend_eth_device_name
),
43 DEFINE_PROP_STRING("ibdev", PVRDMADev
, backend_device_name
),
44 DEFINE_PROP_UINT8("ibport", PVRDMADev
, backend_port_num
, 1),
45 DEFINE_PROP_UINT64("dev-caps-max-mr-size", PVRDMADev
, dev_attr
.max_mr_size
,
47 DEFINE_PROP_INT32("dev-caps-max-qp", PVRDMADev
, dev_attr
.max_qp
, MAX_QP
),
48 DEFINE_PROP_INT32("dev-caps-max-cq", PVRDMADev
, dev_attr
.max_cq
, MAX_CQ
),
49 DEFINE_PROP_INT32("dev-caps-max-mr", PVRDMADev
, dev_attr
.max_mr
, MAX_MR
),
50 DEFINE_PROP_INT32("dev-caps-max-pd", PVRDMADev
, dev_attr
.max_pd
, MAX_PD
),
51 DEFINE_PROP_INT32("dev-caps-qp-rd-atom", PVRDMADev
, dev_attr
.max_qp_rd_atom
,
53 DEFINE_PROP_INT32("dev-caps-max-qp-init-rd-atom", PVRDMADev
,
54 dev_attr
.max_qp_init_rd_atom
, MAX_QP_INIT_RD_ATOM
),
55 DEFINE_PROP_INT32("dev-caps-max-ah", PVRDMADev
, dev_attr
.max_ah
, MAX_AH
),
56 DEFINE_PROP_INT32("dev-caps-max-srq", PVRDMADev
, dev_attr
.max_srq
, MAX_SRQ
),
57 DEFINE_PROP_CHR("mad-chardev", PVRDMADev
, mad_chr
),
58 DEFINE_PROP_END_OF_LIST(),
61 static void pvrdma_print_statistics(Monitor
*mon
, RdmaProvider
*obj
)
63 PVRDMADev
*dev
= PVRDMA_DEV(obj
);
64 PCIDevice
*pdev
= PCI_DEVICE(dev
);
66 monitor_printf(mon
, "%s, %x.%x\n", pdev
->name
, PCI_SLOT(pdev
->devfn
),
67 PCI_FUNC(pdev
->devfn
));
68 monitor_printf(mon
, "\tcommands : %" PRId64
"\n",
70 monitor_printf(mon
, "\tregs_reads : %" PRId64
"\n",
71 dev
->stats
.regs_reads
);
72 monitor_printf(mon
, "\tregs_writes : %" PRId64
"\n",
73 dev
->stats
.regs_writes
);
74 monitor_printf(mon
, "\tuar_writes : %" PRId64
"\n",
75 dev
->stats
.uar_writes
);
76 monitor_printf(mon
, "\tinterrupts : %" PRId64
"\n",
77 dev
->stats
.interrupts
);
78 rdma_dump_device_counters(mon
, &dev
->rdma_dev_res
);
81 static void free_dev_ring(PCIDevice
*pci_dev
, PvrdmaRing
*ring
,
84 pvrdma_ring_free(ring
);
85 rdma_pci_dma_unmap(pci_dev
, ring_state
, TARGET_PAGE_SIZE
);
88 static int init_dev_ring(PvrdmaRing
*ring
, PvrdmaRingState
**ring_state
,
89 const char *name
, PCIDevice
*pci_dev
,
90 dma_addr_t dir_addr
, uint32_t num_pages
)
96 rdma_error_report("Ring pages count must be strictly positive");
100 dir
= rdma_pci_dma_map(pci_dev
, dir_addr
, TARGET_PAGE_SIZE
);
102 rdma_error_report("Failed to map to page directory (ring %s)", name
);
106 tbl
= rdma_pci_dma_map(pci_dev
, dir
[0], TARGET_PAGE_SIZE
);
108 rdma_error_report("Failed to map to page table (ring %s)", name
);
113 *ring_state
= rdma_pci_dma_map(pci_dev
, tbl
[0], TARGET_PAGE_SIZE
);
115 rdma_error_report("Failed to map to ring state (ring %s)", name
);
119 /* RX ring is the second */
121 rc
= pvrdma_ring_init(ring
, name
, pci_dev
,
122 (PvrdmaRingState
*)*ring_state
,
123 (num_pages
- 1) * TARGET_PAGE_SIZE
/
124 sizeof(struct pvrdma_cqne
),
125 sizeof(struct pvrdma_cqne
),
126 (dma_addr_t
*)&tbl
[1], (dma_addr_t
)num_pages
- 1);
129 goto out_free_ring_state
;
135 rdma_pci_dma_unmap(pci_dev
, *ring_state
, TARGET_PAGE_SIZE
);
138 rdma_pci_dma_unmap(pci_dev
, tbl
, TARGET_PAGE_SIZE
);
141 rdma_pci_dma_unmap(pci_dev
, dir
, TARGET_PAGE_SIZE
);
147 static void free_dsr(PVRDMADev
*dev
)
149 PCIDevice
*pci_dev
= PCI_DEVICE(dev
);
151 if (!dev
->dsr_info
.dsr
) {
155 free_dev_ring(pci_dev
, &dev
->dsr_info
.async
,
156 dev
->dsr_info
.async_ring_state
);
158 free_dev_ring(pci_dev
, &dev
->dsr_info
.cq
, dev
->dsr_info
.cq_ring_state
);
160 rdma_pci_dma_unmap(pci_dev
, dev
->dsr_info
.req
,
161 sizeof(union pvrdma_cmd_req
));
163 rdma_pci_dma_unmap(pci_dev
, dev
->dsr_info
.rsp
,
164 sizeof(union pvrdma_cmd_resp
));
166 rdma_pci_dma_unmap(pci_dev
, dev
->dsr_info
.dsr
,
167 sizeof(struct pvrdma_device_shared_region
));
169 dev
->dsr_info
.dsr
= NULL
;
172 static int load_dsr(PVRDMADev
*dev
)
175 PCIDevice
*pci_dev
= PCI_DEVICE(dev
);
177 struct pvrdma_device_shared_region
*dsr
;
182 dev
->dsr_info
.dsr
= rdma_pci_dma_map(pci_dev
, dev
->dsr_info
.dma
,
183 sizeof(struct pvrdma_device_shared_region
));
184 if (!dev
->dsr_info
.dsr
) {
185 rdma_error_report("Failed to map to DSR");
191 dsr_info
= &dev
->dsr_info
;
194 /* Map to command slot */
195 dsr_info
->req
= rdma_pci_dma_map(pci_dev
, dsr
->cmd_slot_dma
,
196 sizeof(union pvrdma_cmd_req
));
197 if (!dsr_info
->req
) {
198 rdma_error_report("Failed to map to command slot address");
203 /* Map to response slot */
204 dsr_info
->rsp
= rdma_pci_dma_map(pci_dev
, dsr
->resp_slot_dma
,
205 sizeof(union pvrdma_cmd_resp
));
206 if (!dsr_info
->rsp
) {
207 rdma_error_report("Failed to map to response slot address");
212 /* Map to CQ notification ring */
213 rc
= init_dev_ring(&dsr_info
->cq
, &dsr_info
->cq_ring_state
, "dev_cq",
214 pci_dev
, dsr
->cq_ring_pages
.pdir_dma
,
215 dsr
->cq_ring_pages
.num_pages
);
221 /* Map to event notification ring */
222 rc
= init_dev_ring(&dsr_info
->async
, &dsr_info
->async_ring_state
,
223 "dev_async", pci_dev
, dsr
->async_ring_pages
.pdir_dma
,
224 dsr
->async_ring_pages
.num_pages
);
233 rdma_pci_dma_unmap(pci_dev
, dsr_info
->rsp
, sizeof(union pvrdma_cmd_resp
));
236 rdma_pci_dma_unmap(pci_dev
, dsr_info
->req
, sizeof(union pvrdma_cmd_req
));
239 rdma_pci_dma_unmap(pci_dev
, dsr_info
->dsr
,
240 sizeof(struct pvrdma_device_shared_region
));
241 dsr_info
->dsr
= NULL
;
247 static void init_dsr_dev_caps(PVRDMADev
*dev
)
249 struct pvrdma_device_shared_region
*dsr
;
251 if (dev
->dsr_info
.dsr
== NULL
) {
252 rdma_error_report("Can't initialized DSR");
256 dsr
= dev
->dsr_info
.dsr
;
257 dsr
->caps
.fw_ver
= PVRDMA_FW_VERSION
;
258 dsr
->caps
.mode
= PVRDMA_DEVICE_MODE_ROCE
;
259 dsr
->caps
.gid_types
|= PVRDMA_GID_TYPE_FLAG_ROCE_V1
;
260 dsr
->caps
.max_uar
= RDMA_BAR2_UAR_SIZE
;
261 dsr
->caps
.max_mr_size
= dev
->dev_attr
.max_mr_size
;
262 dsr
->caps
.max_qp
= dev
->dev_attr
.max_qp
;
263 dsr
->caps
.max_qp_wr
= dev
->dev_attr
.max_qp_wr
;
264 dsr
->caps
.max_sge
= dev
->dev_attr
.max_sge
;
265 dsr
->caps
.max_cq
= dev
->dev_attr
.max_cq
;
266 dsr
->caps
.max_cqe
= dev
->dev_attr
.max_cqe
;
267 dsr
->caps
.max_mr
= dev
->dev_attr
.max_mr
;
268 dsr
->caps
.max_pd
= dev
->dev_attr
.max_pd
;
269 dsr
->caps
.max_ah
= dev
->dev_attr
.max_ah
;
270 dsr
->caps
.max_srq
= dev
->dev_attr
.max_srq
;
271 dsr
->caps
.max_srq_wr
= dev
->dev_attr
.max_srq_wr
;
272 dsr
->caps
.max_srq_sge
= dev
->dev_attr
.max_srq_sge
;
273 dsr
->caps
.gid_tbl_len
= MAX_GIDS
;
274 dsr
->caps
.sys_image_guid
= 0;
275 dsr
->caps
.node_guid
= dev
->node_guid
;
276 dsr
->caps
.phys_port_cnt
= MAX_PORTS
;
277 dsr
->caps
.max_pkeys
= MAX_PKEYS
;
280 static void uninit_msix(PCIDevice
*pdev
, int used_vectors
)
282 PVRDMADev
*dev
= PVRDMA_DEV(pdev
);
285 for (i
= 0; i
< used_vectors
; i
++) {
286 msix_vector_unuse(pdev
, i
);
289 msix_uninit(pdev
, &dev
->msix
, &dev
->msix
);
292 static int init_msix(PCIDevice
*pdev
)
294 PVRDMADev
*dev
= PVRDMA_DEV(pdev
);
298 rc
= msix_init(pdev
, RDMA_MAX_INTRS
, &dev
->msix
, RDMA_MSIX_BAR_IDX
,
299 RDMA_MSIX_TABLE
, &dev
->msix
, RDMA_MSIX_BAR_IDX
,
300 RDMA_MSIX_PBA
, 0, NULL
);
303 rdma_error_report("Failed to initialize MSI-X");
307 for (i
= 0; i
< RDMA_MAX_INTRS
; i
++) {
308 rc
= msix_vector_use(PCI_DEVICE(dev
), i
);
310 rdma_error_report("Fail mark MSI-X vector %d", i
);
311 uninit_msix(pdev
, i
);
319 static void pvrdma_fini(PCIDevice
*pdev
)
321 PVRDMADev
*dev
= PVRDMA_DEV(pdev
);
323 notifier_remove(&dev
->shutdown_notifier
);
325 pvrdma_qp_ops_fini();
327 rdma_backend_stop(&dev
->backend_dev
);
329 rdma_rm_fini(&dev
->rdma_dev_res
, &dev
->backend_dev
,
330 dev
->backend_eth_device_name
);
332 rdma_backend_fini(&dev
->backend_dev
);
336 if (msix_enabled(pdev
)) {
337 uninit_msix(pdev
, RDMA_MAX_INTRS
);
340 rdma_info_report("Device %s %x.%x is down", pdev
->name
,
341 PCI_SLOT(pdev
->devfn
), PCI_FUNC(pdev
->devfn
));
344 static void pvrdma_stop(PVRDMADev
*dev
)
346 rdma_backend_stop(&dev
->backend_dev
);
349 static void pvrdma_start(PVRDMADev
*dev
)
351 rdma_backend_start(&dev
->backend_dev
);
354 static void activate_device(PVRDMADev
*dev
)
357 set_reg_val(dev
, PVRDMA_REG_ERR
, 0);
360 static int unquiesce_device(PVRDMADev
*dev
)
365 static void reset_device(PVRDMADev
*dev
)
370 static uint64_t pvrdma_regs_read(void *opaque
, hwaddr addr
, unsigned size
)
372 PVRDMADev
*dev
= opaque
;
375 dev
->stats
.regs_reads
++;
377 if (get_reg_val(dev
, addr
, &val
)) {
378 rdma_error_report("Failed to read REG value from address 0x%x",
383 trace_pvrdma_regs_read(addr
, val
);
388 static void pvrdma_regs_write(void *opaque
, hwaddr addr
, uint64_t val
,
391 PVRDMADev
*dev
= opaque
;
393 dev
->stats
.regs_writes
++;
395 if (set_reg_val(dev
, addr
, val
)) {
396 rdma_error_report("Failed to set REG value, addr=0x%"PRIx64
", val=0x%"PRIx64
,
402 case PVRDMA_REG_DSRLOW
:
403 trace_pvrdma_regs_write(addr
, val
, "DSRLOW", "");
404 dev
->dsr_info
.dma
= val
;
406 case PVRDMA_REG_DSRHIGH
:
407 trace_pvrdma_regs_write(addr
, val
, "DSRHIGH", "");
408 dev
->dsr_info
.dma
|= val
<< 32;
410 init_dsr_dev_caps(dev
);
414 case PVRDMA_DEVICE_CTL_ACTIVATE
:
415 trace_pvrdma_regs_write(addr
, val
, "CTL", "ACTIVATE");
416 activate_device(dev
);
418 case PVRDMA_DEVICE_CTL_UNQUIESCE
:
419 trace_pvrdma_regs_write(addr
, val
, "CTL", "UNQUIESCE");
420 unquiesce_device(dev
);
422 case PVRDMA_DEVICE_CTL_RESET
:
423 trace_pvrdma_regs_write(addr
, val
, "CTL", "URESET");
429 trace_pvrdma_regs_write(addr
, val
, "INTR_MASK", "");
430 dev
->interrupt_mask
= val
;
432 case PVRDMA_REG_REQUEST
:
434 trace_pvrdma_regs_write(addr
, val
, "REQUEST", "");
435 pvrdma_exec_cmd(dev
);
443 static const MemoryRegionOps regs_ops
= {
444 .read
= pvrdma_regs_read
,
445 .write
= pvrdma_regs_write
,
446 .endianness
= DEVICE_LITTLE_ENDIAN
,
448 .min_access_size
= sizeof(uint32_t),
449 .max_access_size
= sizeof(uint32_t),
453 static uint64_t pvrdma_uar_read(void *opaque
, hwaddr addr
, unsigned size
)
458 static void pvrdma_uar_write(void *opaque
, hwaddr addr
, uint64_t val
,
461 PVRDMADev
*dev
= opaque
;
463 dev
->stats
.uar_writes
++;
465 switch (addr
& 0xFFF) { /* Mask with 0xFFF as each UC gets page */
466 case PVRDMA_UAR_QP_OFFSET
:
467 if (val
& PVRDMA_UAR_QP_SEND
) {
468 trace_pvrdma_uar_write(addr
, val
, "QP", "SEND",
469 val
& PVRDMA_UAR_HANDLE_MASK
, 0);
470 pvrdma_qp_send(dev
, val
& PVRDMA_UAR_HANDLE_MASK
);
472 if (val
& PVRDMA_UAR_QP_RECV
) {
473 trace_pvrdma_uar_write(addr
, val
, "QP", "RECV",
474 val
& PVRDMA_UAR_HANDLE_MASK
, 0);
475 pvrdma_qp_recv(dev
, val
& PVRDMA_UAR_HANDLE_MASK
);
478 case PVRDMA_UAR_CQ_OFFSET
:
479 if (val
& PVRDMA_UAR_CQ_ARM
) {
480 trace_pvrdma_uar_write(addr
, val
, "CQ", "ARM",
481 val
& PVRDMA_UAR_HANDLE_MASK
,
482 !!(val
& PVRDMA_UAR_CQ_ARM_SOL
));
483 rdma_rm_req_notify_cq(&dev
->rdma_dev_res
,
484 val
& PVRDMA_UAR_HANDLE_MASK
,
485 !!(val
& PVRDMA_UAR_CQ_ARM_SOL
));
487 if (val
& PVRDMA_UAR_CQ_ARM_SOL
) {
488 trace_pvrdma_uar_write(addr
, val
, "CQ", "ARMSOL - not supported", 0,
491 if (val
& PVRDMA_UAR_CQ_POLL
) {
492 trace_pvrdma_uar_write(addr
, val
, "CQ", "POLL",
493 val
& PVRDMA_UAR_HANDLE_MASK
, 0);
494 pvrdma_cq_poll(&dev
->rdma_dev_res
, val
& PVRDMA_UAR_HANDLE_MASK
);
497 case PVRDMA_UAR_SRQ_OFFSET
:
498 if (val
& PVRDMA_UAR_SRQ_RECV
) {
499 trace_pvrdma_uar_write(addr
, val
, "QP", "SRQ",
500 val
& PVRDMA_UAR_HANDLE_MASK
, 0);
501 pvrdma_srq_recv(dev
, val
& PVRDMA_UAR_HANDLE_MASK
);
505 rdma_error_report("Unsupported command, addr=0x%"PRIx64
", val=0x%"PRIx64
,
511 static const MemoryRegionOps uar_ops
= {
512 .read
= pvrdma_uar_read
,
513 .write
= pvrdma_uar_write
,
514 .endianness
= DEVICE_LITTLE_ENDIAN
,
516 .min_access_size
= sizeof(uint32_t),
517 .max_access_size
= sizeof(uint32_t),
521 static void init_pci_config(PCIDevice
*pdev
)
523 pdev
->config
[PCI_INTERRUPT_PIN
] = 1;
526 static void init_bars(PCIDevice
*pdev
)
528 PVRDMADev
*dev
= PVRDMA_DEV(pdev
);
531 memory_region_init(&dev
->msix
, OBJECT(dev
), "pvrdma-msix",
532 RDMA_BAR0_MSIX_SIZE
);
533 pci_register_bar(pdev
, RDMA_MSIX_BAR_IDX
, PCI_BASE_ADDRESS_SPACE_MEMORY
,
536 /* BAR 1 - Registers */
537 memset(&dev
->regs_data
, 0, sizeof(dev
->regs_data
));
538 memory_region_init_io(&dev
->regs
, OBJECT(dev
), ®s_ops
, dev
,
539 "pvrdma-regs", sizeof(dev
->regs_data
));
540 pci_register_bar(pdev
, RDMA_REG_BAR_IDX
, PCI_BASE_ADDRESS_SPACE_MEMORY
,
544 memset(&dev
->uar_data
, 0, sizeof(dev
->uar_data
));
545 memory_region_init_io(&dev
->uar
, OBJECT(dev
), &uar_ops
, dev
, "rdma-uar",
546 sizeof(dev
->uar_data
));
547 pci_register_bar(pdev
, RDMA_UAR_BAR_IDX
, PCI_BASE_ADDRESS_SPACE_MEMORY
,
551 static void init_regs(PCIDevice
*pdev
)
553 PVRDMADev
*dev
= PVRDMA_DEV(pdev
);
555 set_reg_val(dev
, PVRDMA_REG_VERSION
, PVRDMA_HW_VERSION
);
556 set_reg_val(dev
, PVRDMA_REG_ERR
, 0xFFFF);
559 static void init_dev_caps(PVRDMADev
*dev
)
561 size_t pg_tbl_bytes
= TARGET_PAGE_SIZE
*
562 (TARGET_PAGE_SIZE
/ sizeof(uint64_t));
563 size_t wr_sz
= MAX(sizeof(struct pvrdma_sq_wqe_hdr
),
564 sizeof(struct pvrdma_rq_wqe_hdr
));
566 dev
->dev_attr
.max_qp_wr
= pg_tbl_bytes
/
567 (wr_sz
+ sizeof(struct pvrdma_sge
) *
568 dev
->dev_attr
.max_sge
) - TARGET_PAGE_SIZE
;
569 /* First page is ring state ^^^^ */
571 dev
->dev_attr
.max_cqe
= pg_tbl_bytes
/ sizeof(struct pvrdma_cqe
) -
572 TARGET_PAGE_SIZE
; /* First page is ring state */
574 dev
->dev_attr
.max_srq_wr
= pg_tbl_bytes
/
575 ((sizeof(struct pvrdma_rq_wqe_hdr
) +
576 sizeof(struct pvrdma_sge
)) *
577 dev
->dev_attr
.max_sge
) - TARGET_PAGE_SIZE
;
580 static int pvrdma_check_ram_shared(Object
*obj
, void *opaque
)
582 bool *shared
= opaque
;
584 if (object_dynamic_cast(obj
, "memory-backend-ram")) {
585 *shared
= object_property_get_bool(obj
, "share", NULL
);
591 static void pvrdma_shutdown_notifier(Notifier
*n
, void *opaque
)
593 PVRDMADev
*dev
= container_of(n
, PVRDMADev
, shutdown_notifier
);
594 PCIDevice
*pci_dev
= PCI_DEVICE(dev
);
596 pvrdma_fini(pci_dev
);
599 static void pvrdma_realize(PCIDevice
*pdev
, Error
**errp
)
602 PVRDMADev
*dev
= PVRDMA_DEV(pdev
);
604 bool ram_shared
= false;
607 rdma_info_report("Initializing device %s %x.%x", pdev
->name
,
608 PCI_SLOT(pdev
->devfn
), PCI_FUNC(pdev
->devfn
));
610 if (TARGET_PAGE_SIZE
!= qemu_real_host_page_size
) {
611 error_setg(errp
, "Target page size must be the same as host page size");
615 func0
= pci_get_function_0(pdev
);
616 /* Break if not vmxnet3 device in slot 0 */
617 if (strcmp(object_get_typename(OBJECT(func0
)), TYPE_VMXNET3
)) {
618 error_setg(errp
, "Device on %x.0 must be %s", PCI_SLOT(pdev
->devfn
),
622 dev
->func0
= VMXNET3(func0
);
624 addrconf_addr_eui48((unsigned char *)&dev
->node_guid
,
625 (const char *)&dev
->func0
->conf
.macaddr
.a
);
627 memdev_root
= object_resolve_path("/objects", NULL
);
629 object_child_foreach(memdev_root
, pvrdma_check_ram_shared
, &ram_shared
);
632 error_setg(errp
, "Only shared memory backed ram is supported");
636 dev
->dsr_info
.dsr
= NULL
;
638 init_pci_config(pdev
);
644 rc
= init_msix(pdev
);
649 rc
= rdma_backend_init(&dev
->backend_dev
, pdev
, &dev
->rdma_dev_res
,
650 dev
->backend_device_name
, dev
->backend_port_num
,
651 &dev
->dev_attr
, &dev
->mad_chr
);
658 rc
= rdma_rm_init(&dev
->rdma_dev_res
, &dev
->dev_attr
);
663 rc
= pvrdma_qp_ops_init();
668 memset(&dev
->stats
, 0, sizeof(dev
->stats
));
670 dev
->shutdown_notifier
.notify
= pvrdma_shutdown_notifier
;
671 qemu_register_shutdown_notifier(&dev
->shutdown_notifier
);
673 #ifdef LEGACY_RDMA_REG_MR
674 rdma_info_report("Using legacy reg_mr");
676 rdma_info_report("Using iova reg_mr");
682 error_append_hint(errp
, "Device failed to load\n");
686 static void pvrdma_class_init(ObjectClass
*klass
, void *data
)
688 DeviceClass
*dc
= DEVICE_CLASS(klass
);
689 PCIDeviceClass
*k
= PCI_DEVICE_CLASS(klass
);
690 RdmaProviderClass
*ir
= RDMA_PROVIDER_CLASS(klass
);
692 k
->realize
= pvrdma_realize
;
693 k
->vendor_id
= PCI_VENDOR_ID_VMWARE
;
694 k
->device_id
= PCI_DEVICE_ID_VMWARE_PVRDMA
;
696 k
->class_id
= PCI_CLASS_NETWORK_OTHER
;
698 dc
->desc
= "RDMA Device";
699 device_class_set_props(dc
, pvrdma_dev_properties
);
700 set_bit(DEVICE_CATEGORY_NETWORK
, dc
->categories
);
702 ir
->print_statistics
= pvrdma_print_statistics
;
705 static const TypeInfo pvrdma_info
= {
706 .name
= PVRDMA_HW_NAME
,
707 .parent
= TYPE_PCI_DEVICE
,
708 .instance_size
= sizeof(PVRDMADev
),
709 .class_init
= pvrdma_class_init
,
710 .interfaces
= (InterfaceInfo
[]) {
711 { INTERFACE_CONVENTIONAL_PCI_DEVICE
},
712 { INTERFACE_RDMA_PROVIDER
},
717 static void register_types(void)
719 type_register_static(&pvrdma_info
);
722 type_init(register_types
)