10 static int io_uring_mmap(int fd
, struct io_uring_params
*p
,
11 struct io_uring_sq
*sq
, struct io_uring_cq
*cq
)
17 sq
->ring_sz
= p
->sq_off
.array
+ p
->sq_entries
* sizeof(unsigned);
18 ptr
= mmap(0, sq
->ring_sz
, PROT_READ
| PROT_WRITE
,
19 MAP_SHARED
| MAP_POPULATE
, fd
, IORING_OFF_SQ_RING
);
20 if (ptr
== MAP_FAILED
)
22 sq
->khead
= ptr
+ p
->sq_off
.head
;
23 sq
->ktail
= ptr
+ p
->sq_off
.tail
;
24 sq
->kring_mask
= ptr
+ p
->sq_off
.ring_mask
;
25 sq
->kring_entries
= ptr
+ p
->sq_off
.ring_entries
;
26 sq
->kflags
= ptr
+ p
->sq_off
.flags
;
27 sq
->kdropped
= ptr
+ p
->sq_off
.dropped
;
28 sq
->array
= ptr
+ p
->sq_off
.array
;
30 size
= p
->sq_entries
* sizeof(struct io_uring_sqe
);
31 sq
->sqes
= mmap(0, size
, PROT_READ
| PROT_WRITE
,
32 MAP_SHARED
| MAP_POPULATE
, fd
,
34 if (sq
->sqes
== MAP_FAILED
) {
37 munmap(sq
->khead
, sq
->ring_sz
);
41 cq
->ring_sz
= p
->cq_off
.cqes
+ p
->cq_entries
* sizeof(struct io_uring_cqe
);
42 ptr
= mmap(0, cq
->ring_sz
, PROT_READ
| PROT_WRITE
,
43 MAP_SHARED
| MAP_POPULATE
, fd
, IORING_OFF_CQ_RING
);
44 if (ptr
== MAP_FAILED
) {
46 munmap(sq
->sqes
, p
->sq_entries
* sizeof(struct io_uring_sqe
));
49 cq
->khead
= ptr
+ p
->cq_off
.head
;
50 cq
->ktail
= ptr
+ p
->cq_off
.tail
;
51 cq
->kring_mask
= ptr
+ p
->cq_off
.ring_mask
;
52 cq
->kring_entries
= ptr
+ p
->cq_off
.ring_entries
;
53 cq
->koverflow
= ptr
+ p
->cq_off
.overflow
;
54 cq
->cqes
= ptr
+ p
->cq_off
.cqes
;
59 * For users that want to specify sq_thread_cpu or sq_thread_idle, this
60 * interface is a convenient helper for mmap()ing the rings.
61 * Returns -1 on error, or zero on success. On success, 'ring'
62 * contains the necessary information to read/write to the rings.
64 int io_uring_queue_mmap(int fd
, struct io_uring_params
*p
, struct io_uring
*ring
)
68 memset(ring
, 0, sizeof(*ring
));
69 ret
= io_uring_mmap(fd
, p
, &ring
->sq
, &ring
->cq
);
76 * Returns -1 on error, or zero on success. On success, 'ring'
77 * contains the necessary information to read/write to the rings.
79 int io_uring_queue_init(unsigned entries
, struct io_uring
*ring
, unsigned flags
)
81 struct io_uring_params p
;
84 memset(&p
, 0, sizeof(p
));
87 fd
= io_uring_setup(entries
, &p
);
91 ret
= io_uring_queue_mmap(fd
, &p
, ring
);
98 void io_uring_queue_exit(struct io_uring
*ring
)
100 struct io_uring_sq
*sq
= &ring
->sq
;
101 struct io_uring_cq
*cq
= &ring
->cq
;
103 munmap(sq
->sqes
, *sq
->kring_entries
* sizeof(struct io_uring_sqe
));
104 munmap(sq
->khead
, sq
->ring_sz
);
105 munmap(cq
->khead
, cq
->ring_sz
);
106 close(ring
->ring_fd
);