2 * QEMU paravirtual RDMA - Device rings
4 * Copyright (C) 2018 Oracle
5 * Copyright (C) 2018 Red Hat Inc
8 * Yuval Shaia <yuval.shaia@oracle.com>
9 * Marcel Apfelbaum <marcel@redhat.com>
11 * This work is licensed under the terms of the GNU GPL, version 2 or later.
12 * See the COPYING file in the top-level directory.
16 #include "qemu/osdep.h"
17 #include "qemu/cutils.h"
18 #include "hw/pci/pci.h"
20 #include "qemu/cutils.h"
24 #include "../rdma_utils.h"
25 #include "pvrdma_dev_ring.h"
27 int pvrdma_ring_init(PvrdmaRing
*ring
, const char *name
, PCIDevice
*dev
,
28 PvrdmaRingState
*ring_state
, uint32_t max_elems
,
29 size_t elem_sz
, dma_addr_t
*tbl
, uint32_t npages
)
34 pstrcpy(ring
->name
, MAX_RING_NAME_SZ
, name
);
36 ring
->ring_state
= ring_state
;
37 ring
->max_elems
= max_elems
;
38 ring
->elem_sz
= elem_sz
;
39 /* TODO: Give a moment to think if we want to redo driver settings
40 qatomic_set(&ring->ring_state->prod_tail, 0);
41 qatomic_set(&ring->ring_state->cons_head, 0);
43 ring
->npages
= npages
;
44 ring
->pages
= g_malloc0(npages
* sizeof(void *));
46 for (i
= 0; i
< npages
; i
++) {
48 rdma_error_report("npages=%d but tbl[%d] is NULL", npages
, i
);
52 ring
->pages
[i
] = rdma_pci_dma_map(dev
, tbl
[i
], TARGET_PAGE_SIZE
);
53 if (!ring
->pages
[i
]) {
55 rdma_error_report("Failed to map to page %d in ring %s", i
, name
);
58 memset(ring
->pages
[i
], 0, TARGET_PAGE_SIZE
);
65 rdma_pci_dma_unmap(dev
, ring
->pages
[i
], TARGET_PAGE_SIZE
);
73 void *pvrdma_ring_next_elem_read(PvrdmaRing
*ring
)
75 unsigned int idx
, offset
;
76 const uint32_t tail
= qatomic_read(&ring
->ring_state
->prod_tail
);
77 const uint32_t head
= qatomic_read(&ring
->ring_state
->cons_head
);
79 if (tail
& ~((ring
->max_elems
<< 1) - 1) ||
80 head
& ~((ring
->max_elems
<< 1) - 1) ||
82 trace_pvrdma_ring_next_elem_read_no_data(ring
->name
);
86 idx
= head
& (ring
->max_elems
- 1);
87 offset
= idx
* ring
->elem_sz
;
88 return ring
->pages
[offset
/ TARGET_PAGE_SIZE
] + (offset
% TARGET_PAGE_SIZE
);
91 void pvrdma_ring_read_inc(PvrdmaRing
*ring
)
93 uint32_t idx
= qatomic_read(&ring
->ring_state
->cons_head
);
95 idx
= (idx
+ 1) & ((ring
->max_elems
<< 1) - 1);
96 qatomic_set(&ring
->ring_state
->cons_head
, idx
);
99 void *pvrdma_ring_next_elem_write(PvrdmaRing
*ring
)
101 unsigned int idx
, offset
;
102 const uint32_t tail
= qatomic_read(&ring
->ring_state
->prod_tail
);
103 const uint32_t head
= qatomic_read(&ring
->ring_state
->cons_head
);
105 if (tail
& ~((ring
->max_elems
<< 1) - 1) ||
106 head
& ~((ring
->max_elems
<< 1) - 1) ||
107 tail
== (head
^ ring
->max_elems
)) {
108 rdma_error_report("CQ is full");
112 idx
= tail
& (ring
->max_elems
- 1);
113 offset
= idx
* ring
->elem_sz
;
114 return ring
->pages
[offset
/ TARGET_PAGE_SIZE
] + (offset
% TARGET_PAGE_SIZE
);
117 void pvrdma_ring_write_inc(PvrdmaRing
*ring
)
119 uint32_t idx
= qatomic_read(&ring
->ring_state
->prod_tail
);
121 idx
= (idx
+ 1) & ((ring
->max_elems
<< 1) - 1);
122 qatomic_set(&ring
->ring_state
->prod_tail
, idx
);
125 void pvrdma_ring_free(PvrdmaRing
*ring
)
135 while (ring
->npages
--) {
136 rdma_pci_dma_unmap(ring
->dev
, ring
->pages
[ring
->npages
],