2 * Functions related to io context handling
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/init.h>
8 #include <linux/blkdev.h>
9 #include <linux/slab.h>
10 #include <linux/sched/task.h>
15 * For io context allocations
17 static struct kmem_cache
*iocontext_cachep
;
20 * get_io_context - increment reference count to io_context
21 * @ioc: io_context to get
23 * Increment reference count to @ioc.
25 void get_io_context(struct io_context
*ioc
)
27 BUG_ON(atomic_long_read(&ioc
->refcount
) <= 0);
28 atomic_long_inc(&ioc
->refcount
);
30 EXPORT_SYMBOL(get_io_context
);
32 static void icq_free_icq_rcu(struct rcu_head
*head
)
34 struct io_cq
*icq
= container_of(head
, struct io_cq
, __rcu_head
);
36 kmem_cache_free(icq
->__rcu_icq_cache
, icq
);
40 * Exit an icq. Called with ioc locked for blk-mq, and with both ioc
41 * and queue locked for legacy.
43 static void ioc_exit_icq(struct io_cq
*icq
)
45 struct elevator_type
*et
= icq
->q
->elevator
->type
;
47 if (icq
->flags
& ICQ_EXITED
)
50 if (et
->uses_mq
&& et
->ops
.mq
.exit_icq
)
51 et
->ops
.mq
.exit_icq(icq
);
52 else if (!et
->uses_mq
&& et
->ops
.sq
.elevator_exit_icq_fn
)
53 et
->ops
.sq
.elevator_exit_icq_fn(icq
);
55 icq
->flags
|= ICQ_EXITED
;
59 * Release an icq. Called with ioc locked for blk-mq, and with both ioc
60 * and queue locked for legacy.
62 static void ioc_destroy_icq(struct io_cq
*icq
)
64 struct io_context
*ioc
= icq
->ioc
;
65 struct request_queue
*q
= icq
->q
;
66 struct elevator_type
*et
= q
->elevator
->type
;
68 lockdep_assert_held(&ioc
->lock
);
70 radix_tree_delete(&ioc
->icq_tree
, icq
->q
->id
);
71 hlist_del_init(&icq
->ioc_node
);
72 list_del_init(&icq
->q_node
);
75 * Both setting lookup hint to and clearing it from @icq are done
76 * under queue_lock. If it's not pointing to @icq now, it never
77 * will. Hint assignment itself can race safely.
79 if (rcu_access_pointer(ioc
->icq_hint
) == icq
)
80 rcu_assign_pointer(ioc
->icq_hint
, NULL
);
85 * @icq->q might have gone away by the time RCU callback runs
86 * making it impossible to determine icq_cache. Record it in @icq.
88 icq
->__rcu_icq_cache
= et
->icq_cache
;
89 call_rcu(&icq
->__rcu_head
, icq_free_icq_rcu
);
93 * Slow path for ioc release in put_io_context(). Performs double-lock
94 * dancing to unlink all icq's and then frees ioc.
96 static void ioc_release_fn(struct work_struct
*work
)
98 struct io_context
*ioc
= container_of(work
, struct io_context
,
103 * Exiting icq may call into put_io_context() through elevator
104 * which will trigger lockdep warning. The ioc's are guaranteed to
105 * be different, use a different locking subclass here. Use
106 * irqsave variant as there's no spin_lock_irq_nested().
108 spin_lock_irqsave_nested(&ioc
->lock
, flags
, 1);
110 while (!hlist_empty(&ioc
->icq_list
)) {
111 struct io_cq
*icq
= hlist_entry(ioc
->icq_list
.first
,
112 struct io_cq
, ioc_node
);
113 struct request_queue
*q
= icq
->q
;
115 if (spin_trylock(q
->queue_lock
)) {
116 ioc_destroy_icq(icq
);
117 spin_unlock(q
->queue_lock
);
119 spin_unlock_irqrestore(&ioc
->lock
, flags
);
121 spin_lock_irqsave_nested(&ioc
->lock
, flags
, 1);
125 spin_unlock_irqrestore(&ioc
->lock
, flags
);
127 kmem_cache_free(iocontext_cachep
, ioc
);
131 * put_io_context - put a reference of io_context
132 * @ioc: io_context to put
134 * Decrement reference count of @ioc and release it if the count reaches
137 void put_io_context(struct io_context
*ioc
)
140 bool free_ioc
= false;
145 BUG_ON(atomic_long_read(&ioc
->refcount
) <= 0);
148 * Releasing ioc requires reverse order double locking and we may
149 * already be holding a queue_lock. Do it asynchronously from wq.
151 if (atomic_long_dec_and_test(&ioc
->refcount
)) {
152 spin_lock_irqsave(&ioc
->lock
, flags
);
153 if (!hlist_empty(&ioc
->icq_list
))
154 queue_work(system_power_efficient_wq
,
158 spin_unlock_irqrestore(&ioc
->lock
, flags
);
162 kmem_cache_free(iocontext_cachep
, ioc
);
164 EXPORT_SYMBOL(put_io_context
);
167 * put_io_context_active - put active reference on ioc
168 * @ioc: ioc of interest
170 * Undo get_io_context_active(). If active reference reaches zero after
171 * put, @ioc can never issue further IOs and ioscheds are notified.
173 void put_io_context_active(struct io_context
*ioc
)
175 struct elevator_type
*et
;
179 if (!atomic_dec_and_test(&ioc
->active_ref
)) {
185 * Need ioc lock to walk icq_list and q lock to exit icq. Perform
186 * reverse double locking. Read comment in ioc_release_fn() for
187 * explanation on the nested locking annotation.
190 spin_lock_irqsave_nested(&ioc
->lock
, flags
, 1);
191 hlist_for_each_entry(icq
, &ioc
->icq_list
, ioc_node
) {
192 if (icq
->flags
& ICQ_EXITED
)
195 et
= icq
->q
->elevator
->type
;
199 if (spin_trylock(icq
->q
->queue_lock
)) {
201 spin_unlock(icq
->q
->queue_lock
);
203 spin_unlock_irqrestore(&ioc
->lock
, flags
);
209 spin_unlock_irqrestore(&ioc
->lock
, flags
);
214 /* Called by the exiting task */
215 void exit_io_context(struct task_struct
*task
)
217 struct io_context
*ioc
;
220 ioc
= task
->io_context
;
221 task
->io_context
= NULL
;
224 atomic_dec(&ioc
->nr_tasks
);
225 put_io_context_active(ioc
);
228 static void __ioc_clear_queue(struct list_head
*icq_list
)
232 while (!list_empty(icq_list
)) {
233 struct io_cq
*icq
= list_entry(icq_list
->next
,
234 struct io_cq
, q_node
);
235 struct io_context
*ioc
= icq
->ioc
;
237 spin_lock_irqsave(&ioc
->lock
, flags
);
238 ioc_destroy_icq(icq
);
239 spin_unlock_irqrestore(&ioc
->lock
, flags
);
244 * ioc_clear_queue - break any ioc association with the specified queue
245 * @q: request_queue being cleared
247 * Walk @q->icq_list and exit all io_cq's.
249 void ioc_clear_queue(struct request_queue
*q
)
253 spin_lock_irq(q
->queue_lock
);
254 list_splice_init(&q
->icq_list
, &icq_list
);
257 spin_unlock_irq(q
->queue_lock
);
258 __ioc_clear_queue(&icq_list
);
260 __ioc_clear_queue(&icq_list
);
261 spin_unlock_irq(q
->queue_lock
);
265 int create_task_io_context(struct task_struct
*task
, gfp_t gfp_flags
, int node
)
267 struct io_context
*ioc
;
270 ioc
= kmem_cache_alloc_node(iocontext_cachep
, gfp_flags
| __GFP_ZERO
,
276 atomic_long_set(&ioc
->refcount
, 1);
277 atomic_set(&ioc
->nr_tasks
, 1);
278 atomic_set(&ioc
->active_ref
, 1);
279 spin_lock_init(&ioc
->lock
);
280 INIT_RADIX_TREE(&ioc
->icq_tree
, GFP_ATOMIC
| __GFP_HIGH
);
281 INIT_HLIST_HEAD(&ioc
->icq_list
);
282 INIT_WORK(&ioc
->release_work
, ioc_release_fn
);
285 * Try to install. ioc shouldn't be installed if someone else
286 * already did or @task, which isn't %current, is exiting. Note
287 * that we need to allow ioc creation on exiting %current as exit
288 * path may issue IOs from e.g. exit_files(). The exit path is
289 * responsible for not issuing IO after exit_io_context().
292 if (!task
->io_context
&&
293 (task
== current
|| !(task
->flags
& PF_EXITING
)))
294 task
->io_context
= ioc
;
296 kmem_cache_free(iocontext_cachep
, ioc
);
298 ret
= task
->io_context
? 0 : -EBUSY
;
306 * get_task_io_context - get io_context of a task
307 * @task: task of interest
308 * @gfp_flags: allocation flags, used if allocation is necessary
309 * @node: allocation node, used if allocation is necessary
311 * Return io_context of @task. If it doesn't exist, it is created with
312 * @gfp_flags and @node. The returned io_context has its reference count
315 * This function always goes through task_lock() and it's better to use
316 * %current->io_context + get_io_context() for %current.
318 struct io_context
*get_task_io_context(struct task_struct
*task
,
319 gfp_t gfp_flags
, int node
)
321 struct io_context
*ioc
;
323 might_sleep_if(gfpflags_allow_blocking(gfp_flags
));
327 ioc
= task
->io_context
;
334 } while (!create_task_io_context(task
, gfp_flags
, node
));
338 EXPORT_SYMBOL(get_task_io_context
);
341 * ioc_lookup_icq - lookup io_cq from ioc
342 * @ioc: the associated io_context
343 * @q: the associated request_queue
345 * Look up io_cq associated with @ioc - @q pair from @ioc. Must be called
346 * with @q->queue_lock held.
348 struct io_cq
*ioc_lookup_icq(struct io_context
*ioc
, struct request_queue
*q
)
352 lockdep_assert_held(q
->queue_lock
);
355 * icq's are indexed from @ioc using radix tree and hint pointer,
356 * both of which are protected with RCU. All removals are done
357 * holding both q and ioc locks, and we're holding q lock - if we
358 * find a icq which points to us, it's guaranteed to be valid.
361 icq
= rcu_dereference(ioc
->icq_hint
);
362 if (icq
&& icq
->q
== q
)
365 icq
= radix_tree_lookup(&ioc
->icq_tree
, q
->id
);
366 if (icq
&& icq
->q
== q
)
367 rcu_assign_pointer(ioc
->icq_hint
, icq
); /* allowed to race */
374 EXPORT_SYMBOL(ioc_lookup_icq
);
377 * ioc_create_icq - create and link io_cq
378 * @ioc: io_context of interest
379 * @q: request_queue of interest
380 * @gfp_mask: allocation mask
382 * Make sure io_cq linking @ioc and @q exists. If icq doesn't exist, they
383 * will be created using @gfp_mask.
385 * The caller is responsible for ensuring @ioc won't go away and @q is
386 * alive and will stay alive until this function returns.
388 struct io_cq
*ioc_create_icq(struct io_context
*ioc
, struct request_queue
*q
,
391 struct elevator_type
*et
= q
->elevator
->type
;
395 icq
= kmem_cache_alloc_node(et
->icq_cache
, gfp_mask
| __GFP_ZERO
,
400 if (radix_tree_maybe_preload(gfp_mask
) < 0) {
401 kmem_cache_free(et
->icq_cache
, icq
);
407 INIT_LIST_HEAD(&icq
->q_node
);
408 INIT_HLIST_NODE(&icq
->ioc_node
);
410 /* lock both q and ioc and try to link @icq */
411 spin_lock_irq(q
->queue_lock
);
412 spin_lock(&ioc
->lock
);
414 if (likely(!radix_tree_insert(&ioc
->icq_tree
, q
->id
, icq
))) {
415 hlist_add_head(&icq
->ioc_node
, &ioc
->icq_list
);
416 list_add(&icq
->q_node
, &q
->icq_list
);
417 if (et
->uses_mq
&& et
->ops
.mq
.init_icq
)
418 et
->ops
.mq
.init_icq(icq
);
419 else if (!et
->uses_mq
&& et
->ops
.sq
.elevator_init_icq_fn
)
420 et
->ops
.sq
.elevator_init_icq_fn(icq
);
422 kmem_cache_free(et
->icq_cache
, icq
);
423 icq
= ioc_lookup_icq(ioc
, q
);
425 printk(KERN_ERR
"cfq: icq link failed!\n");
428 spin_unlock(&ioc
->lock
);
429 spin_unlock_irq(q
->queue_lock
);
430 radix_tree_preload_end();
434 static int __init
blk_ioc_init(void)
436 iocontext_cachep
= kmem_cache_create("blkdev_ioc",
437 sizeof(struct io_context
), 0, SLAB_PANIC
, NULL
);
440 subsys_initcall(blk_ioc_init
);