treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / tools / io_uring / setup.c
blob0b50fcd78520c494cca32be35722f2a32f4595d7
1 #include <sys/types.h>
2 #include <sys/stat.h>
3 #include <sys/mman.h>
4 #include <unistd.h>
5 #include <errno.h>
6 #include <string.h>
8 #include "liburing.h"
10 static int io_uring_mmap(int fd, struct io_uring_params *p,
11 struct io_uring_sq *sq, struct io_uring_cq *cq)
13 size_t size;
14 void *ptr;
15 int ret;
17 sq->ring_sz = p->sq_off.array + p->sq_entries * sizeof(unsigned);
18 ptr = mmap(0, sq->ring_sz, PROT_READ | PROT_WRITE,
19 MAP_SHARED | MAP_POPULATE, fd, IORING_OFF_SQ_RING);
20 if (ptr == MAP_FAILED)
21 return -errno;
22 sq->khead = ptr + p->sq_off.head;
23 sq->ktail = ptr + p->sq_off.tail;
24 sq->kring_mask = ptr + p->sq_off.ring_mask;
25 sq->kring_entries = ptr + p->sq_off.ring_entries;
26 sq->kflags = ptr + p->sq_off.flags;
27 sq->kdropped = ptr + p->sq_off.dropped;
28 sq->array = ptr + p->sq_off.array;
30 size = p->sq_entries * sizeof(struct io_uring_sqe);
31 sq->sqes = mmap(0, size, PROT_READ | PROT_WRITE,
32 MAP_SHARED | MAP_POPULATE, fd,
33 IORING_OFF_SQES);
34 if (sq->sqes == MAP_FAILED) {
35 ret = -errno;
36 err:
37 munmap(sq->khead, sq->ring_sz);
38 return ret;
41 cq->ring_sz = p->cq_off.cqes + p->cq_entries * sizeof(struct io_uring_cqe);
42 ptr = mmap(0, cq->ring_sz, PROT_READ | PROT_WRITE,
43 MAP_SHARED | MAP_POPULATE, fd, IORING_OFF_CQ_RING);
44 if (ptr == MAP_FAILED) {
45 ret = -errno;
46 munmap(sq->sqes, p->sq_entries * sizeof(struct io_uring_sqe));
47 goto err;
49 cq->khead = ptr + p->cq_off.head;
50 cq->ktail = ptr + p->cq_off.tail;
51 cq->kring_mask = ptr + p->cq_off.ring_mask;
52 cq->kring_entries = ptr + p->cq_off.ring_entries;
53 cq->koverflow = ptr + p->cq_off.overflow;
54 cq->cqes = ptr + p->cq_off.cqes;
55 return 0;
59 * For users that want to specify sq_thread_cpu or sq_thread_idle, this
60 * interface is a convenient helper for mmap()ing the rings.
61 * Returns -1 on error, or zero on success. On success, 'ring'
62 * contains the necessary information to read/write to the rings.
64 int io_uring_queue_mmap(int fd, struct io_uring_params *p, struct io_uring *ring)
66 int ret;
68 memset(ring, 0, sizeof(*ring));
69 ret = io_uring_mmap(fd, p, &ring->sq, &ring->cq);
70 if (!ret)
71 ring->ring_fd = fd;
72 return ret;
76 * Returns -1 on error, or zero on success. On success, 'ring'
77 * contains the necessary information to read/write to the rings.
79 int io_uring_queue_init(unsigned entries, struct io_uring *ring, unsigned flags)
81 struct io_uring_params p;
82 int fd, ret;
84 memset(&p, 0, sizeof(p));
85 p.flags = flags;
87 fd = io_uring_setup(entries, &p);
88 if (fd < 0)
89 return fd;
91 ret = io_uring_queue_mmap(fd, &p, ring);
92 if (ret)
93 close(fd);
95 return ret;
98 void io_uring_queue_exit(struct io_uring *ring)
100 struct io_uring_sq *sq = &ring->sq;
101 struct io_uring_cq *cq = &ring->cq;
103 munmap(sq->sqes, *sq->kring_entries * sizeof(struct io_uring_sqe));
104 munmap(sq->khead, sq->ring_sz);
105 munmap(cq->khead, cq->ring_sz);
106 close(ring->ring_fd);