11 #include "../../include/uapi/linux/io_uring.h"
13 #include <linux/swab.h>
17 * Library interface to io_uring
23 unsigned *kring_entries
;
27 struct io_uring_sqe
*sqes
;
39 unsigned *kring_entries
;
41 struct io_uring_cqe
*cqes
;
47 struct io_uring_sq sq
;
48 struct io_uring_cq cq
;
55 extern int io_uring_setup(unsigned entries
, struct io_uring_params
*p
);
56 extern int io_uring_enter(int fd
, unsigned to_submit
,
57 unsigned min_complete
, unsigned flags
, sigset_t
*sig
);
58 extern int io_uring_register(int fd
, unsigned int opcode
, void *arg
,
59 unsigned int nr_args
);
64 extern int io_uring_queue_init(unsigned entries
, struct io_uring
*ring
,
66 extern int io_uring_queue_mmap(int fd
, struct io_uring_params
*p
,
67 struct io_uring
*ring
);
68 extern void io_uring_queue_exit(struct io_uring
*ring
);
69 extern int io_uring_peek_cqe(struct io_uring
*ring
,
70 struct io_uring_cqe
**cqe_ptr
);
71 extern int io_uring_wait_cqe(struct io_uring
*ring
,
72 struct io_uring_cqe
**cqe_ptr
);
73 extern int io_uring_submit(struct io_uring
*ring
);
74 extern struct io_uring_sqe
*io_uring_get_sqe(struct io_uring
*ring
);
77 * Must be called after io_uring_{peek,wait}_cqe() after the cqe has
78 * been processed by the application.
80 static inline void io_uring_cqe_seen(struct io_uring
*ring
,
81 struct io_uring_cqe
*cqe
)
84 struct io_uring_cq
*cq
= &ring
->cq
;
88 * Ensure that the kernel sees our new head, the kernel has
89 * the matching read barrier.
96 * Command prep helpers
98 static inline void io_uring_sqe_set_data(struct io_uring_sqe
*sqe
, void *data
)
100 sqe
->user_data
= (unsigned long) data
;
103 static inline void *io_uring_cqe_get_data(struct io_uring_cqe
*cqe
)
105 return (void *) (uintptr_t) cqe
->user_data
;
108 static inline void io_uring_prep_rw(int op
, struct io_uring_sqe
*sqe
, int fd
,
109 const void *addr
, unsigned len
,
112 memset(sqe
, 0, sizeof(*sqe
));
116 sqe
->addr
= (unsigned long) addr
;
120 static inline void io_uring_prep_readv(struct io_uring_sqe
*sqe
, int fd
,
121 const struct iovec
*iovecs
,
122 unsigned nr_vecs
, off_t offset
)
124 io_uring_prep_rw(IORING_OP_READV
, sqe
, fd
, iovecs
, nr_vecs
, offset
);
127 static inline void io_uring_prep_read_fixed(struct io_uring_sqe
*sqe
, int fd
,
128 void *buf
, unsigned nbytes
,
131 io_uring_prep_rw(IORING_OP_READ_FIXED
, sqe
, fd
, buf
, nbytes
, offset
);
134 static inline void io_uring_prep_writev(struct io_uring_sqe
*sqe
, int fd
,
135 const struct iovec
*iovecs
,
136 unsigned nr_vecs
, off_t offset
)
138 io_uring_prep_rw(IORING_OP_WRITEV
, sqe
, fd
, iovecs
, nr_vecs
, offset
);
141 static inline void io_uring_prep_write_fixed(struct io_uring_sqe
*sqe
, int fd
,
142 const void *buf
, unsigned nbytes
,
145 io_uring_prep_rw(IORING_OP_WRITE_FIXED
, sqe
, fd
, buf
, nbytes
, offset
);
148 static inline void io_uring_prep_poll_add(struct io_uring_sqe
*sqe
, int fd
,
151 memset(sqe
, 0, sizeof(*sqe
));
152 sqe
->opcode
= IORING_OP_POLL_ADD
;
154 #if __BYTE_ORDER == __BIG_ENDIAN
155 poll_mask
= __swahw32(poll_mask
);
157 sqe
->poll_events
= poll_mask
;
160 static inline void io_uring_prep_poll_remove(struct io_uring_sqe
*sqe
,
163 memset(sqe
, 0, sizeof(*sqe
));
164 sqe
->opcode
= IORING_OP_POLL_REMOVE
;
165 sqe
->addr
= (unsigned long) user_data
;
168 static inline void io_uring_prep_fsync(struct io_uring_sqe
*sqe
, int fd
,
169 unsigned fsync_flags
)
171 memset(sqe
, 0, sizeof(*sqe
));
172 sqe
->opcode
= IORING_OP_FSYNC
;
174 sqe
->fsync_flags
= fsync_flags
;
177 static inline void io_uring_prep_nop(struct io_uring_sqe
*sqe
)
179 memset(sqe
, 0, sizeof(*sqe
));
180 sqe
->opcode
= IORING_OP_NOP
;