1 // SPDX-License-Identifier: GPL-2.0
3 * Functions related to io context handling
5 #include <linux/kernel.h>
6 #include <linux/module.h>
7 #include <linux/init.h>
9 #include <linux/blkdev.h>
10 #include <linux/slab.h>
11 #include <linux/security.h>
12 #include <linux/sched/task.h>
15 #include "blk-mq-sched.h"
18 * For io context allocations
20 static struct kmem_cache
*iocontext_cachep
;
24 * get_io_context - increment reference count to io_context
25 * @ioc: io_context to get
27 * Increment reference count to @ioc.
29 static void get_io_context(struct io_context
*ioc
)
31 BUG_ON(atomic_long_read(&ioc
->refcount
) <= 0);
32 atomic_long_inc(&ioc
->refcount
);
36 * Exit an icq. Called with ioc locked for blk-mq, and with both ioc
37 * and queue locked for legacy.
39 static void ioc_exit_icq(struct io_cq
*icq
)
41 struct elevator_type
*et
= icq
->q
->elevator
->type
;
43 if (icq
->flags
& ICQ_EXITED
)
47 et
->ops
.exit_icq(icq
);
49 icq
->flags
|= ICQ_EXITED
;
52 static void ioc_exit_icqs(struct io_context
*ioc
)
56 spin_lock_irq(&ioc
->lock
);
57 hlist_for_each_entry(icq
, &ioc
->icq_list
, ioc_node
)
59 spin_unlock_irq(&ioc
->lock
);
63 * Release an icq. Called with ioc locked for blk-mq, and with both ioc
64 * and queue locked for legacy.
66 static void ioc_destroy_icq(struct io_cq
*icq
)
68 struct io_context
*ioc
= icq
->ioc
;
69 struct request_queue
*q
= icq
->q
;
70 struct elevator_type
*et
= q
->elevator
->type
;
72 lockdep_assert_held(&ioc
->lock
);
73 lockdep_assert_held(&q
->queue_lock
);
75 if (icq
->flags
& ICQ_DESTROYED
)
78 radix_tree_delete(&ioc
->icq_tree
, icq
->q
->id
);
79 hlist_del_init(&icq
->ioc_node
);
80 list_del_init(&icq
->q_node
);
83 * Both setting lookup hint to and clearing it from @icq are done
84 * under queue_lock. If it's not pointing to @icq now, it never
85 * will. Hint assignment itself can race safely.
87 if (rcu_access_pointer(ioc
->icq_hint
) == icq
)
88 rcu_assign_pointer(ioc
->icq_hint
, NULL
);
93 * @icq->q might have gone away by the time RCU callback runs
94 * making it impossible to determine icq_cache. Record it in @icq.
96 icq
->__rcu_icq_cache
= et
->icq_cache
;
97 icq
->flags
|= ICQ_DESTROYED
;
98 kfree_rcu(icq
, __rcu_head
);
102 * Slow path for ioc release in put_io_context(). Performs double-lock
103 * dancing to unlink all icq's and then frees ioc.
105 static void ioc_release_fn(struct work_struct
*work
)
107 struct io_context
*ioc
= container_of(work
, struct io_context
,
109 spin_lock_irq(&ioc
->lock
);
111 while (!hlist_empty(&ioc
->icq_list
)) {
112 struct io_cq
*icq
= hlist_entry(ioc
->icq_list
.first
,
113 struct io_cq
, ioc_node
);
114 struct request_queue
*q
= icq
->q
;
116 if (spin_trylock(&q
->queue_lock
)) {
117 ioc_destroy_icq(icq
);
118 spin_unlock(&q
->queue_lock
);
120 /* Make sure q and icq cannot be freed. */
123 /* Re-acquire the locks in the correct order. */
124 spin_unlock(&ioc
->lock
);
125 spin_lock(&q
->queue_lock
);
126 spin_lock(&ioc
->lock
);
128 ioc_destroy_icq(icq
);
130 spin_unlock(&q
->queue_lock
);
135 spin_unlock_irq(&ioc
->lock
);
137 kmem_cache_free(iocontext_cachep
, ioc
);
141 * Releasing icqs requires reverse order double locking and we may already be
142 * holding a queue_lock. Do it asynchronously from a workqueue.
144 static bool ioc_delay_free(struct io_context
*ioc
)
148 spin_lock_irqsave(&ioc
->lock
, flags
);
149 if (!hlist_empty(&ioc
->icq_list
)) {
150 queue_work(system_power_efficient_wq
, &ioc
->release_work
);
151 spin_unlock_irqrestore(&ioc
->lock
, flags
);
154 spin_unlock_irqrestore(&ioc
->lock
, flags
);
159 * ioc_clear_queue - break any ioc association with the specified queue
160 * @q: request_queue being cleared
162 * Walk @q->icq_list and exit all io_cq's.
164 void ioc_clear_queue(struct request_queue
*q
)
166 spin_lock_irq(&q
->queue_lock
);
167 while (!list_empty(&q
->icq_list
)) {
169 list_first_entry(&q
->icq_list
, struct io_cq
, q_node
);
172 * Other context won't hold ioc lock to wait for queue_lock, see
173 * details in ioc_release_fn().
175 spin_lock(&icq
->ioc
->lock
);
176 ioc_destroy_icq(icq
);
177 spin_unlock(&icq
->ioc
->lock
);
179 spin_unlock_irq(&q
->queue_lock
);
181 #else /* CONFIG_BLK_ICQ */
182 static inline void ioc_exit_icqs(struct io_context
*ioc
)
185 static inline bool ioc_delay_free(struct io_context
*ioc
)
189 #endif /* CONFIG_BLK_ICQ */
192 * put_io_context - put a reference of io_context
193 * @ioc: io_context to put
195 * Decrement reference count of @ioc and release it if the count reaches
198 void put_io_context(struct io_context
*ioc
)
200 BUG_ON(atomic_long_read(&ioc
->refcount
) <= 0);
201 if (atomic_long_dec_and_test(&ioc
->refcount
) && !ioc_delay_free(ioc
))
202 kmem_cache_free(iocontext_cachep
, ioc
);
204 EXPORT_SYMBOL_GPL(put_io_context
);
206 /* Called by the exiting task */
207 void exit_io_context(struct task_struct
*task
)
209 struct io_context
*ioc
;
212 ioc
= task
->io_context
;
213 task
->io_context
= NULL
;
216 if (atomic_dec_and_test(&ioc
->active_ref
)) {
222 static struct io_context
*alloc_io_context(gfp_t gfp_flags
, int node
)
224 struct io_context
*ioc
;
226 ioc
= kmem_cache_alloc_node(iocontext_cachep
, gfp_flags
| __GFP_ZERO
,
231 atomic_long_set(&ioc
->refcount
, 1);
232 atomic_set(&ioc
->active_ref
, 1);
233 #ifdef CONFIG_BLK_ICQ
234 spin_lock_init(&ioc
->lock
);
235 INIT_RADIX_TREE(&ioc
->icq_tree
, GFP_ATOMIC
);
236 INIT_HLIST_HEAD(&ioc
->icq_list
);
237 INIT_WORK(&ioc
->release_work
, ioc_release_fn
);
239 ioc
->ioprio
= IOPRIO_DEFAULT
;
244 int set_task_ioprio(struct task_struct
*task
, int ioprio
)
247 const struct cred
*cred
= current_cred(), *tcred
;
250 tcred
= __task_cred(task
);
251 if (!uid_eq(tcred
->uid
, cred
->euid
) &&
252 !uid_eq(tcred
->uid
, cred
->uid
) && !capable(CAP_SYS_NICE
)) {
258 err
= security_task_setioprio(task
, ioprio
);
263 if (unlikely(!task
->io_context
)) {
264 struct io_context
*ioc
;
268 ioc
= alloc_io_context(GFP_ATOMIC
, NUMA_NO_NODE
);
273 if (task
->flags
& PF_EXITING
) {
274 kmem_cache_free(iocontext_cachep
, ioc
);
277 if (task
->io_context
)
278 kmem_cache_free(iocontext_cachep
, ioc
);
280 task
->io_context
= ioc
;
282 task
->io_context
->ioprio
= ioprio
;
287 EXPORT_SYMBOL_GPL(set_task_ioprio
);
289 int __copy_io(unsigned long clone_flags
, struct task_struct
*tsk
)
291 struct io_context
*ioc
= current
->io_context
;
294 * Share io context with parent, if CLONE_IO is set
296 if (clone_flags
& CLONE_IO
) {
297 atomic_inc(&ioc
->active_ref
);
298 tsk
->io_context
= ioc
;
299 } else if (ioprio_valid(ioc
->ioprio
)) {
300 tsk
->io_context
= alloc_io_context(GFP_KERNEL
, NUMA_NO_NODE
);
301 if (!tsk
->io_context
)
303 tsk
->io_context
->ioprio
= ioc
->ioprio
;
309 #ifdef CONFIG_BLK_ICQ
311 * ioc_lookup_icq - lookup io_cq from ioc
312 * @q: the associated request_queue
314 * Look up io_cq associated with @ioc - @q pair from @ioc. Must be called
315 * with @q->queue_lock held.
317 struct io_cq
*ioc_lookup_icq(struct request_queue
*q
)
319 struct io_context
*ioc
= current
->io_context
;
322 lockdep_assert_held(&q
->queue_lock
);
325 * icq's are indexed from @ioc using radix tree and hint pointer,
326 * both of which are protected with RCU. All removals are done
327 * holding both q and ioc locks, and we're holding q lock - if we
328 * find a icq which points to us, it's guaranteed to be valid.
331 icq
= rcu_dereference(ioc
->icq_hint
);
332 if (icq
&& icq
->q
== q
)
335 icq
= radix_tree_lookup(&ioc
->icq_tree
, q
->id
);
336 if (icq
&& icq
->q
== q
)
337 rcu_assign_pointer(ioc
->icq_hint
, icq
); /* allowed to race */
344 EXPORT_SYMBOL(ioc_lookup_icq
);
347 * ioc_create_icq - create and link io_cq
348 * @q: request_queue of interest
350 * Make sure io_cq linking @ioc and @q exists. If icq doesn't exist, they
351 * will be created using @gfp_mask.
353 * The caller is responsible for ensuring @ioc won't go away and @q is
354 * alive and will stay alive until this function returns.
356 static struct io_cq
*ioc_create_icq(struct request_queue
*q
)
358 struct io_context
*ioc
= current
->io_context
;
359 struct elevator_type
*et
= q
->elevator
->type
;
363 icq
= kmem_cache_alloc_node(et
->icq_cache
, GFP_ATOMIC
| __GFP_ZERO
,
368 if (radix_tree_maybe_preload(GFP_ATOMIC
) < 0) {
369 kmem_cache_free(et
->icq_cache
, icq
);
375 INIT_LIST_HEAD(&icq
->q_node
);
376 INIT_HLIST_NODE(&icq
->ioc_node
);
378 /* lock both q and ioc and try to link @icq */
379 spin_lock_irq(&q
->queue_lock
);
380 spin_lock(&ioc
->lock
);
382 if (likely(!radix_tree_insert(&ioc
->icq_tree
, q
->id
, icq
))) {
383 hlist_add_head(&icq
->ioc_node
, &ioc
->icq_list
);
384 list_add(&icq
->q_node
, &q
->icq_list
);
385 if (et
->ops
.init_icq
)
386 et
->ops
.init_icq(icq
);
388 kmem_cache_free(et
->icq_cache
, icq
);
389 icq
= ioc_lookup_icq(q
);
391 printk(KERN_ERR
"cfq: icq link failed!\n");
394 spin_unlock(&ioc
->lock
);
395 spin_unlock_irq(&q
->queue_lock
);
396 radix_tree_preload_end();
400 struct io_cq
*ioc_find_get_icq(struct request_queue
*q
)
402 struct io_context
*ioc
= current
->io_context
;
403 struct io_cq
*icq
= NULL
;
405 if (unlikely(!ioc
)) {
406 ioc
= alloc_io_context(GFP_ATOMIC
, q
->node
);
411 if (current
->io_context
) {
412 kmem_cache_free(iocontext_cachep
, ioc
);
413 ioc
= current
->io_context
;
415 current
->io_context
= ioc
;
419 task_unlock(current
);
423 spin_lock_irq(&q
->queue_lock
);
424 icq
= ioc_lookup_icq(q
);
425 spin_unlock_irq(&q
->queue_lock
);
429 icq
= ioc_create_icq(q
);
437 EXPORT_SYMBOL_GPL(ioc_find_get_icq
);
438 #endif /* CONFIG_BLK_ICQ */
440 static int __init
blk_ioc_init(void)
442 iocontext_cachep
= kmem_cache_create("blkdev_ioc",
443 sizeof(struct io_context
), 0, SLAB_PANIC
, NULL
);
446 subsys_initcall(blk_ioc_init
);