1 // SPDX-License-Identifier: GPL-2.0
5 #define IO_NODE_ALLOC_CACHE_MAX 32
7 #define IO_RSRC_TAG_TABLE_SHIFT (PAGE_SHIFT - 3)
8 #define IO_RSRC_TAG_TABLE_MAX (1U << IO_RSRC_TAG_TABLE_SHIFT)
9 #define IO_RSRC_TAG_TABLE_MASK (IO_RSRC_TAG_TABLE_MAX - 1)
13 IORING_RSRC_BUFFER
= 1,
21 struct io_mapped_ubuf
*buf
;
25 typedef void (rsrc_put_fn
)(struct io_ring_ctx
*ctx
, struct io_rsrc_put
*prsrc
);
28 struct io_ring_ctx
*ctx
;
37 struct io_ring_ctx
*ctx
;
41 struct list_head node
;
42 struct io_rsrc_put item
;
45 struct io_mapped_ubuf
{
48 unsigned int nr_bvecs
;
49 unsigned long acct_pages
;
50 struct bio_vec bvec
[] __counted_by(nr_bvecs
);
53 void io_rsrc_node_ref_zero(struct io_rsrc_node
*node
);
54 void io_rsrc_node_destroy(struct io_ring_ctx
*ctx
, struct io_rsrc_node
*ref_node
);
55 struct io_rsrc_node
*io_rsrc_node_alloc(struct io_ring_ctx
*ctx
);
56 int io_queue_rsrc_removal(struct io_rsrc_data
*data
, unsigned idx
, void *rsrc
);
58 int io_import_fixed(int ddir
, struct iov_iter
*iter
,
59 struct io_mapped_ubuf
*imu
,
60 u64 buf_addr
, size_t len
);
62 void __io_sqe_buffers_unregister(struct io_ring_ctx
*ctx
);
63 int io_sqe_buffers_unregister(struct io_ring_ctx
*ctx
);
64 int io_sqe_buffers_register(struct io_ring_ctx
*ctx
, void __user
*arg
,
65 unsigned int nr_args
, u64 __user
*tags
);
66 void __io_sqe_files_unregister(struct io_ring_ctx
*ctx
);
67 int io_sqe_files_unregister(struct io_ring_ctx
*ctx
);
68 int io_sqe_files_register(struct io_ring_ctx
*ctx
, void __user
*arg
,
69 unsigned nr_args
, u64 __user
*tags
);
71 int io_register_files_update(struct io_ring_ctx
*ctx
, void __user
*arg
,
73 int io_register_rsrc_update(struct io_ring_ctx
*ctx
, void __user
*arg
,
74 unsigned size
, unsigned type
);
75 int io_register_rsrc(struct io_ring_ctx
*ctx
, void __user
*arg
,
76 unsigned int size
, unsigned int type
);
78 static inline void io_put_rsrc_node(struct io_ring_ctx
*ctx
, struct io_rsrc_node
*node
)
80 lockdep_assert_held(&ctx
->uring_lock
);
82 if (node
&& !--node
->refs
)
83 io_rsrc_node_ref_zero(node
);
86 static inline void io_charge_rsrc_node(struct io_ring_ctx
*ctx
,
87 struct io_rsrc_node
*node
)
92 static inline void __io_req_set_rsrc_node(struct io_kiocb
*req
,
93 struct io_ring_ctx
*ctx
)
95 lockdep_assert_held(&ctx
->uring_lock
);
96 req
->rsrc_node
= ctx
->rsrc_node
;
97 io_charge_rsrc_node(ctx
, ctx
->rsrc_node
);
100 static inline void io_req_set_rsrc_node(struct io_kiocb
*req
,
101 struct io_ring_ctx
*ctx
,
102 unsigned int issue_flags
)
104 if (!req
->rsrc_node
) {
105 io_ring_submit_lock(ctx
, issue_flags
);
106 __io_req_set_rsrc_node(req
, ctx
);
107 io_ring_submit_unlock(ctx
, issue_flags
);
111 static inline u64
*io_get_tag_slot(struct io_rsrc_data
*data
, unsigned int idx
)
113 unsigned int off
= idx
& IO_RSRC_TAG_TABLE_MASK
;
114 unsigned int table_idx
= idx
>> IO_RSRC_TAG_TABLE_SHIFT
;
116 return &data
->tags
[table_idx
][off
];
119 static inline int io_rsrc_init(struct io_ring_ctx
*ctx
)
121 ctx
->rsrc_node
= io_rsrc_node_alloc(ctx
);
122 return ctx
->rsrc_node
? 0 : -ENOMEM
;
125 int io_files_update(struct io_kiocb
*req
, unsigned int issue_flags
);
126 int io_files_update_prep(struct io_kiocb
*req
, const struct io_uring_sqe
*sqe
);
128 int __io_account_mem(struct user_struct
*user
, unsigned long nr_pages
);
130 static inline void __io_unaccount_mem(struct user_struct
*user
,
131 unsigned long nr_pages
)
133 atomic_long_sub(nr_pages
, &user
->locked_vm
);