1 // SPDX-License-Identifier: GPL-2.0
5 #include <uapi/linux/io_uring.h>
8 /* ring mapped provided buffers */
10 /* ring mapped provided buffers, but mmap'ed by application */
12 /* buffers are consumed incrementally rather than always fully */
17 struct io_buffer_list
{
19 * If ->buf_nr_pages is set, then buf_pages/buf_ring are used. If not,
20 * then these are classic provided buffers and ->buf_list is used.
23 struct list_head buf_list
;
25 struct page
**buf_pages
;
26 struct io_uring_buf_ring
*buf_ring
;
32 /* below is for ring provided buffers */
44 struct list_head list
;
52 /* can alloc a bigger vec */
54 /* if bigger vec allocated, free old one */
62 unsigned short nr_iovs
;
66 void __user
*io_buffer_select(struct io_kiocb
*req
, size_t *len
,
67 unsigned int issue_flags
);
68 int io_buffers_select(struct io_kiocb
*req
, struct buf_sel_arg
*arg
,
69 unsigned int issue_flags
);
70 int io_buffers_peek(struct io_kiocb
*req
, struct buf_sel_arg
*arg
);
71 void io_destroy_buffers(struct io_ring_ctx
*ctx
);
73 int io_remove_buffers_prep(struct io_kiocb
*req
, const struct io_uring_sqe
*sqe
);
74 int io_remove_buffers(struct io_kiocb
*req
, unsigned int issue_flags
);
76 int io_provide_buffers_prep(struct io_kiocb
*req
, const struct io_uring_sqe
*sqe
);
77 int io_provide_buffers(struct io_kiocb
*req
, unsigned int issue_flags
);
79 int io_register_pbuf_ring(struct io_ring_ctx
*ctx
, void __user
*arg
);
80 int io_unregister_pbuf_ring(struct io_ring_ctx
*ctx
, void __user
*arg
);
81 int io_register_pbuf_status(struct io_ring_ctx
*ctx
, void __user
*arg
);
83 void __io_put_kbuf(struct io_kiocb
*req
, int len
, unsigned issue_flags
);
85 bool io_kbuf_recycle_legacy(struct io_kiocb
*req
, unsigned issue_flags
);
87 void io_put_bl(struct io_ring_ctx
*ctx
, struct io_buffer_list
*bl
);
88 struct io_buffer_list
*io_pbuf_get_bl(struct io_ring_ctx
*ctx
,
90 int io_pbuf_mmap(struct file
*file
, struct vm_area_struct
*vma
);
92 static inline bool io_kbuf_recycle_ring(struct io_kiocb
*req
)
95 * We don't need to recycle for REQ_F_BUFFER_RING, we can just clear
96 * the flag and hence ensure that bl->head doesn't get incremented.
97 * If the tail has already been incremented, hang on to it.
98 * The exception is partial io, that case we should increment bl->head
99 * to monopolize the buffer.
102 req
->buf_index
= req
->buf_list
->bgid
;
103 req
->flags
&= ~(REQ_F_BUFFER_RING
|REQ_F_BUFFERS_COMMIT
);
109 static inline bool io_do_buffer_select(struct io_kiocb
*req
)
111 if (!(req
->flags
& REQ_F_BUFFER_SELECT
))
113 return !(req
->flags
& (REQ_F_BUFFER_SELECTED
|REQ_F_BUFFER_RING
));
116 static inline bool io_kbuf_recycle(struct io_kiocb
*req
, unsigned issue_flags
)
118 if (req
->flags
& REQ_F_BL_NO_RECYCLE
)
120 if (req
->flags
& REQ_F_BUFFER_SELECTED
)
121 return io_kbuf_recycle_legacy(req
, issue_flags
);
122 if (req
->flags
& REQ_F_BUFFER_RING
)
123 return io_kbuf_recycle_ring(req
);
127 /* Mapped buffer ring, return io_uring_buf from head */
128 #define io_ring_head_to_buf(br, head, mask) &(br)->bufs[(head) & (mask)]
130 static inline bool io_kbuf_commit(struct io_kiocb
*req
,
131 struct io_buffer_list
*bl
, int len
, int nr
)
133 if (unlikely(!(req
->flags
& REQ_F_BUFFERS_COMMIT
)))
136 req
->flags
&= ~REQ_F_BUFFERS_COMMIT
;
138 if (unlikely(len
< 0))
141 if (bl
->flags
& IOBL_INC
) {
142 struct io_uring_buf
*buf
;
144 buf
= io_ring_head_to_buf(bl
->buf_ring
, bl
->head
, bl
->mask
);
145 if (WARN_ON_ONCE(len
> buf
->len
))
158 static inline bool __io_put_kbuf_ring(struct io_kiocb
*req
, int len
, int nr
)
160 struct io_buffer_list
*bl
= req
->buf_list
;
164 ret
= io_kbuf_commit(req
, bl
, len
, nr
);
165 req
->buf_index
= bl
->bgid
;
167 req
->flags
&= ~REQ_F_BUFFER_RING
;
171 static inline void __io_put_kbuf_list(struct io_kiocb
*req
, int len
,
172 struct list_head
*list
)
174 if (req
->flags
& REQ_F_BUFFER_RING
) {
175 __io_put_kbuf_ring(req
, len
, 1);
177 req
->buf_index
= req
->kbuf
->bgid
;
178 list_add(&req
->kbuf
->list
, list
);
179 req
->flags
&= ~REQ_F_BUFFER_SELECTED
;
183 static inline void io_kbuf_drop(struct io_kiocb
*req
)
185 lockdep_assert_held(&req
->ctx
->completion_lock
);
187 if (!(req
->flags
& (REQ_F_BUFFER_SELECTED
|REQ_F_BUFFER_RING
)))
190 /* len == 0 is fine here, non-ring will always drop all of it */
191 __io_put_kbuf_list(req
, 0, &req
->ctx
->io_buffers_comp
);
194 static inline unsigned int __io_put_kbufs(struct io_kiocb
*req
, int len
,
195 int nbufs
, unsigned issue_flags
)
199 if (!(req
->flags
& (REQ_F_BUFFER_RING
| REQ_F_BUFFER_SELECTED
)))
202 ret
= IORING_CQE_F_BUFFER
| (req
->buf_index
<< IORING_CQE_BUFFER_SHIFT
);
203 if (req
->flags
& REQ_F_BUFFER_RING
) {
204 if (!__io_put_kbuf_ring(req
, len
, nbufs
))
205 ret
|= IORING_CQE_F_BUF_MORE
;
207 __io_put_kbuf(req
, len
, issue_flags
);
212 static inline unsigned int io_put_kbuf(struct io_kiocb
*req
, int len
,
213 unsigned issue_flags
)
215 return __io_put_kbufs(req
, len
, 1, issue_flags
);
218 static inline unsigned int io_put_kbufs(struct io_kiocb
*req
, int len
,
219 int nbufs
, unsigned issue_flags
)
221 return __io_put_kbufs(req
, len
, nbufs
, issue_flags
);