2 * An async IO implementation for Linux
3 * Written by Benjamin LaHaise <bcrl@kvack.org>
5 * Implements an efficient asynchronous io interface.
7 * Copyright 2000, 2001, 2002 Red Hat, Inc. All Rights Reserved.
9 * See ../COPYING for licensing terms.
11 #include <linux/kernel.h>
12 #include <linux/init.h>
13 #include <linux/errno.h>
14 #include <linux/time.h>
15 #include <linux/aio_abi.h>
16 #include <linux/export.h>
17 #include <linux/syscalls.h>
18 #include <linux/backing-dev.h>
19 #include <linux/uio.h>
23 #include <linux/sched.h>
25 #include <linux/file.h>
27 #include <linux/mman.h>
28 #include <linux/mmu_context.h>
29 #include <linux/slab.h>
30 #include <linux/timer.h>
31 #include <linux/aio.h>
32 #include <linux/highmem.h>
33 #include <linux/workqueue.h>
34 #include <linux/security.h>
35 #include <linux/eventfd.h>
36 #include <linux/blkdev.h>
37 #include <linux/compat.h>
39 #include <asm/kmap_types.h>
40 #include <asm/uaccess.h>
43 #define dprintk printk
45 #define dprintk(x...) do { ; } while (0)
48 /*------ sysctl variables----*/
49 static DEFINE_SPINLOCK(aio_nr_lock
);
50 unsigned long aio_nr
; /* current system wide number of aio requests */
51 unsigned long aio_max_nr
= 0x10000; /* system wide maximum number of aio requests */
52 /*----end sysctl variables---*/
54 static struct kmem_cache
*kiocb_cachep
;
55 static struct kmem_cache
*kioctx_cachep
;
57 static struct workqueue_struct
*aio_wq
;
59 static void aio_kick_handler(struct work_struct
*);
60 static void aio_queue_work(struct kioctx
*);
63 * Creates the slab caches used by the aio routines, panic on
64 * failure as this is done early during the boot sequence.
66 static int __init
aio_setup(void)
68 kiocb_cachep
= KMEM_CACHE(kiocb
, SLAB_HWCACHE_ALIGN
|SLAB_PANIC
);
69 kioctx_cachep
= KMEM_CACHE(kioctx
,SLAB_HWCACHE_ALIGN
|SLAB_PANIC
);
71 aio_wq
= alloc_workqueue("aio", 0, 1); /* used to limit concurrency */
74 pr_debug("aio_setup: sizeof(struct page) = %d\n", (int)sizeof(struct page
));
78 __initcall(aio_setup
);
80 static void aio_free_ring(struct kioctx
*ctx
)
82 struct aio_ring_info
*info
= &ctx
->ring_info
;
85 for (i
=0; i
<info
->nr_pages
; i
++)
86 put_page(info
->ring_pages
[i
]);
88 if (info
->mmap_size
) {
89 BUG_ON(ctx
->mm
!= current
->mm
);
90 vm_munmap(info
->mmap_base
, info
->mmap_size
);
93 if (info
->ring_pages
&& info
->ring_pages
!= info
->internal_pages
)
94 kfree(info
->ring_pages
);
95 info
->ring_pages
= NULL
;
99 static int aio_setup_ring(struct kioctx
*ctx
)
101 struct aio_ring
*ring
;
102 struct aio_ring_info
*info
= &ctx
->ring_info
;
103 unsigned nr_events
= ctx
->max_reqs
;
104 unsigned long size
, populate
;
107 /* Compensate for the ring buffer's head/tail overlap entry */
108 nr_events
+= 2; /* 1 is required, 2 for good luck */
110 size
= sizeof(struct aio_ring
);
111 size
+= sizeof(struct io_event
) * nr_events
;
112 nr_pages
= (size
+ PAGE_SIZE
-1) >> PAGE_SHIFT
;
117 nr_events
= (PAGE_SIZE
* nr_pages
- sizeof(struct aio_ring
)) / sizeof(struct io_event
);
120 info
->ring_pages
= info
->internal_pages
;
121 if (nr_pages
> AIO_RING_PAGES
) {
122 info
->ring_pages
= kcalloc(nr_pages
, sizeof(struct page
*), GFP_KERNEL
);
123 if (!info
->ring_pages
)
127 info
->mmap_size
= nr_pages
* PAGE_SIZE
;
128 dprintk("attempting mmap of %lu bytes\n", info
->mmap_size
);
129 down_write(&ctx
->mm
->mmap_sem
);
130 info
->mmap_base
= do_mmap_pgoff(NULL
, 0, info
->mmap_size
,
131 PROT_READ
|PROT_WRITE
,
132 MAP_ANONYMOUS
|MAP_PRIVATE
, 0,
134 if (IS_ERR((void *)info
->mmap_base
)) {
135 up_write(&ctx
->mm
->mmap_sem
);
141 dprintk("mmap address: 0x%08lx\n", info
->mmap_base
);
142 info
->nr_pages
= get_user_pages(current
, ctx
->mm
,
143 info
->mmap_base
, nr_pages
,
144 1, 0, info
->ring_pages
, NULL
);
145 up_write(&ctx
->mm
->mmap_sem
);
147 if (unlikely(info
->nr_pages
!= nr_pages
)) {
152 mm_populate(info
->mmap_base
, populate
);
154 ctx
->user_id
= info
->mmap_base
;
156 info
->nr
= nr_events
; /* trusted copy */
158 ring
= kmap_atomic(info
->ring_pages
[0]);
159 ring
->nr
= nr_events
; /* user copy */
160 ring
->id
= ctx
->user_id
;
161 ring
->head
= ring
->tail
= 0;
162 ring
->magic
= AIO_RING_MAGIC
;
163 ring
->compat_features
= AIO_RING_COMPAT_FEATURES
;
164 ring
->incompat_features
= AIO_RING_INCOMPAT_FEATURES
;
165 ring
->header_length
= sizeof(struct aio_ring
);
172 /* aio_ring_event: returns a pointer to the event at the given index from
173 * kmap_atomic(). Release the pointer with put_aio_ring_event();
175 #define AIO_EVENTS_PER_PAGE (PAGE_SIZE / sizeof(struct io_event))
176 #define AIO_EVENTS_FIRST_PAGE ((PAGE_SIZE - sizeof(struct aio_ring)) / sizeof(struct io_event))
177 #define AIO_EVENTS_OFFSET (AIO_EVENTS_PER_PAGE - AIO_EVENTS_FIRST_PAGE)
179 #define aio_ring_event(info, nr) ({ \
180 unsigned pos = (nr) + AIO_EVENTS_OFFSET; \
181 struct io_event *__event; \
182 __event = kmap_atomic( \
183 (info)->ring_pages[pos / AIO_EVENTS_PER_PAGE]); \
184 __event += pos % AIO_EVENTS_PER_PAGE; \
188 #define put_aio_ring_event(event) do { \
189 struct io_event *__event = (event); \
191 kunmap_atomic((void *)((unsigned long)__event & PAGE_MASK)); \
194 static void ctx_rcu_free(struct rcu_head
*head
)
196 struct kioctx
*ctx
= container_of(head
, struct kioctx
, rcu_head
);
197 kmem_cache_free(kioctx_cachep
, ctx
);
201 * Called when the last user of an aio context has gone away,
202 * and the struct needs to be freed.
204 static void __put_ioctx(struct kioctx
*ctx
)
206 unsigned nr_events
= ctx
->max_reqs
;
207 BUG_ON(ctx
->reqs_active
);
209 cancel_delayed_work_sync(&ctx
->wq
);
214 spin_lock(&aio_nr_lock
);
215 BUG_ON(aio_nr
- nr_events
> aio_nr
);
217 spin_unlock(&aio_nr_lock
);
219 pr_debug("__put_ioctx: freeing %p\n", ctx
);
220 call_rcu(&ctx
->rcu_head
, ctx_rcu_free
);
223 static inline int try_get_ioctx(struct kioctx
*kioctx
)
225 return atomic_inc_not_zero(&kioctx
->users
);
228 static inline void put_ioctx(struct kioctx
*kioctx
)
230 BUG_ON(atomic_read(&kioctx
->users
) <= 0);
231 if (unlikely(atomic_dec_and_test(&kioctx
->users
)))
236 * Allocates and initializes an ioctx. Returns an ERR_PTR if it failed.
238 static struct kioctx
*ioctx_alloc(unsigned nr_events
)
240 struct mm_struct
*mm
;
244 /* Prevent overflows */
245 if ((nr_events
> (0x10000000U
/ sizeof(struct io_event
))) ||
246 (nr_events
> (0x10000000U
/ sizeof(struct kiocb
)))) {
247 pr_debug("ENOMEM: nr_events too high\n");
248 return ERR_PTR(-EINVAL
);
251 if (!nr_events
|| (unsigned long)nr_events
> aio_max_nr
)
252 return ERR_PTR(-EAGAIN
);
254 ctx
= kmem_cache_zalloc(kioctx_cachep
, GFP_KERNEL
);
256 return ERR_PTR(-ENOMEM
);
258 ctx
->max_reqs
= nr_events
;
259 mm
= ctx
->mm
= current
->mm
;
260 atomic_inc(&mm
->mm_count
);
262 atomic_set(&ctx
->users
, 2);
263 spin_lock_init(&ctx
->ctx_lock
);
264 spin_lock_init(&ctx
->ring_info
.ring_lock
);
265 init_waitqueue_head(&ctx
->wait
);
267 INIT_LIST_HEAD(&ctx
->active_reqs
);
268 INIT_LIST_HEAD(&ctx
->run_list
);
269 INIT_DELAYED_WORK(&ctx
->wq
, aio_kick_handler
);
271 if (aio_setup_ring(ctx
) < 0)
274 /* limit the number of system wide aios */
275 spin_lock(&aio_nr_lock
);
276 if (aio_nr
+ nr_events
> aio_max_nr
||
277 aio_nr
+ nr_events
< aio_nr
) {
278 spin_unlock(&aio_nr_lock
);
281 aio_nr
+= ctx
->max_reqs
;
282 spin_unlock(&aio_nr_lock
);
284 /* now link into global list. */
285 spin_lock(&mm
->ioctx_lock
);
286 hlist_add_head_rcu(&ctx
->list
, &mm
->ioctx_list
);
287 spin_unlock(&mm
->ioctx_lock
);
289 dprintk("aio: allocated ioctx %p[%ld]: mm=%p mask=0x%x\n",
290 ctx
, ctx
->user_id
, current
->mm
, ctx
->ring_info
.nr
);
298 kmem_cache_free(kioctx_cachep
, ctx
);
299 dprintk("aio: error allocating ioctx %d\n", err
);
304 * Cancels all outstanding aio requests on an aio context. Used
305 * when the processes owning a context have all exited to encourage
306 * the rapid destruction of the kioctx.
308 static void kill_ctx(struct kioctx
*ctx
)
310 int (*cancel
)(struct kiocb
*, struct io_event
*);
311 struct task_struct
*tsk
= current
;
312 DECLARE_WAITQUEUE(wait
, tsk
);
315 spin_lock_irq(&ctx
->ctx_lock
);
317 while (!list_empty(&ctx
->active_reqs
)) {
318 struct list_head
*pos
= ctx
->active_reqs
.next
;
319 struct kiocb
*iocb
= list_kiocb(pos
);
320 list_del_init(&iocb
->ki_list
);
321 cancel
= iocb
->ki_cancel
;
322 kiocbSetCancelled(iocb
);
325 spin_unlock_irq(&ctx
->ctx_lock
);
327 spin_lock_irq(&ctx
->ctx_lock
);
331 if (!ctx
->reqs_active
)
334 add_wait_queue(&ctx
->wait
, &wait
);
335 set_task_state(tsk
, TASK_UNINTERRUPTIBLE
);
336 while (ctx
->reqs_active
) {
337 spin_unlock_irq(&ctx
->ctx_lock
);
339 set_task_state(tsk
, TASK_UNINTERRUPTIBLE
);
340 spin_lock_irq(&ctx
->ctx_lock
);
342 __set_task_state(tsk
, TASK_RUNNING
);
343 remove_wait_queue(&ctx
->wait
, &wait
);
346 spin_unlock_irq(&ctx
->ctx_lock
);
349 /* wait_on_sync_kiocb:
350 * Waits on the given sync kiocb to complete.
352 ssize_t
wait_on_sync_kiocb(struct kiocb
*iocb
)
354 while (iocb
->ki_users
) {
355 set_current_state(TASK_UNINTERRUPTIBLE
);
360 __set_current_state(TASK_RUNNING
);
361 return iocb
->ki_user_data
;
363 EXPORT_SYMBOL(wait_on_sync_kiocb
);
365 /* exit_aio: called when the last user of mm goes away. At this point,
366 * there is no way for any new requests to be submited or any of the
367 * io_* syscalls to be called on the context. However, there may be
368 * outstanding requests which hold references to the context; as they
369 * go away, they will call put_ioctx and release any pinned memory
370 * associated with the request (held via struct page * references).
372 void exit_aio(struct mm_struct
*mm
)
376 while (!hlist_empty(&mm
->ioctx_list
)) {
377 ctx
= hlist_entry(mm
->ioctx_list
.first
, struct kioctx
, list
);
378 hlist_del_rcu(&ctx
->list
);
382 if (1 != atomic_read(&ctx
->users
))
384 "exit_aio:ioctx still alive: %d %d %d\n",
385 atomic_read(&ctx
->users
), ctx
->dead
,
388 * We don't need to bother with munmap() here -
389 * exit_mmap(mm) is coming and it'll unmap everything.
390 * Since aio_free_ring() uses non-zero ->mmap_size
391 * as indicator that it needs to unmap the area,
392 * just set it to 0; aio_free_ring() is the only
393 * place that uses ->mmap_size, so it's safe.
394 * That way we get all munmap done to current->mm -
395 * all other callers have ctx->mm == current->mm.
397 ctx
->ring_info
.mmap_size
= 0;
403 * Allocate a slot for an aio request. Increments the users count
404 * of the kioctx so that the kioctx stays around until all requests are
405 * complete. Returns NULL if no requests are free.
407 * Returns with kiocb->users set to 2. The io submit code path holds
408 * an extra reference while submitting the i/o.
409 * This prevents races between the aio code path referencing the
410 * req (after submitting it) and aio_complete() freeing the req.
412 static struct kiocb
*__aio_get_req(struct kioctx
*ctx
)
414 struct kiocb
*req
= NULL
;
416 req
= kmem_cache_alloc(kiocb_cachep
, GFP_KERNEL
);
424 req
->ki_cancel
= NULL
;
425 req
->ki_retry
= NULL
;
428 req
->ki_iovec
= NULL
;
429 INIT_LIST_HEAD(&req
->ki_run_list
);
430 req
->ki_eventfd
= NULL
;
436 * struct kiocb's are allocated in batches to reduce the number of
437 * times the ctx lock is acquired and released.
439 #define KIOCB_BATCH_SIZE 32L
441 struct list_head head
;
442 long count
; /* number of requests left to allocate */
445 static void kiocb_batch_init(struct kiocb_batch
*batch
, long total
)
447 INIT_LIST_HEAD(&batch
->head
);
448 batch
->count
= total
;
451 static void kiocb_batch_free(struct kioctx
*ctx
, struct kiocb_batch
*batch
)
453 struct kiocb
*req
, *n
;
455 if (list_empty(&batch
->head
))
458 spin_lock_irq(&ctx
->ctx_lock
);
459 list_for_each_entry_safe(req
, n
, &batch
->head
, ki_batch
) {
460 list_del(&req
->ki_batch
);
461 list_del(&req
->ki_list
);
462 kmem_cache_free(kiocb_cachep
, req
);
465 if (unlikely(!ctx
->reqs_active
&& ctx
->dead
))
466 wake_up_all(&ctx
->wait
);
467 spin_unlock_irq(&ctx
->ctx_lock
);
471 * Allocate a batch of kiocbs. This avoids taking and dropping the
472 * context lock a lot during setup.
474 static int kiocb_batch_refill(struct kioctx
*ctx
, struct kiocb_batch
*batch
)
476 unsigned short allocated
, to_alloc
;
478 struct kiocb
*req
, *n
;
479 struct aio_ring
*ring
;
481 to_alloc
= min(batch
->count
, KIOCB_BATCH_SIZE
);
482 for (allocated
= 0; allocated
< to_alloc
; allocated
++) {
483 req
= __aio_get_req(ctx
);
485 /* allocation failed, go with what we've got */
487 list_add(&req
->ki_batch
, &batch
->head
);
493 spin_lock_irq(&ctx
->ctx_lock
);
494 ring
= kmap_atomic(ctx
->ring_info
.ring_pages
[0]);
496 avail
= aio_ring_avail(&ctx
->ring_info
, ring
) - ctx
->reqs_active
;
498 if (avail
< allocated
) {
499 /* Trim back the number of requests. */
500 list_for_each_entry_safe(req
, n
, &batch
->head
, ki_batch
) {
501 list_del(&req
->ki_batch
);
502 kmem_cache_free(kiocb_cachep
, req
);
503 if (--allocated
<= avail
)
508 batch
->count
-= allocated
;
509 list_for_each_entry(req
, &batch
->head
, ki_batch
) {
510 list_add(&req
->ki_list
, &ctx
->active_reqs
);
515 spin_unlock_irq(&ctx
->ctx_lock
);
521 static inline struct kiocb
*aio_get_req(struct kioctx
*ctx
,
522 struct kiocb_batch
*batch
)
526 if (list_empty(&batch
->head
))
527 if (kiocb_batch_refill(ctx
, batch
) == 0)
529 req
= list_first_entry(&batch
->head
, struct kiocb
, ki_batch
);
530 list_del(&req
->ki_batch
);
534 static inline void really_put_req(struct kioctx
*ctx
, struct kiocb
*req
)
536 assert_spin_locked(&ctx
->ctx_lock
);
538 if (req
->ki_eventfd
!= NULL
)
539 eventfd_ctx_put(req
->ki_eventfd
);
542 if (req
->ki_iovec
!= &req
->ki_inline_vec
)
543 kfree(req
->ki_iovec
);
544 kmem_cache_free(kiocb_cachep
, req
);
547 if (unlikely(!ctx
->reqs_active
&& ctx
->dead
))
548 wake_up_all(&ctx
->wait
);
552 * Returns true if this put was the last user of the request.
554 static int __aio_put_req(struct kioctx
*ctx
, struct kiocb
*req
)
556 dprintk(KERN_DEBUG
"aio_put(%p): f_count=%ld\n",
557 req
, atomic_long_read(&req
->ki_filp
->f_count
));
559 assert_spin_locked(&ctx
->ctx_lock
);
562 BUG_ON(req
->ki_users
< 0);
563 if (likely(req
->ki_users
))
565 list_del(&req
->ki_list
); /* remove from active_reqs */
566 req
->ki_cancel
= NULL
;
567 req
->ki_retry
= NULL
;
571 really_put_req(ctx
, req
);
576 * Returns true if this put was the last user of the kiocb,
577 * false if the request is still in use.
579 int aio_put_req(struct kiocb
*req
)
581 struct kioctx
*ctx
= req
->ki_ctx
;
583 spin_lock_irq(&ctx
->ctx_lock
);
584 ret
= __aio_put_req(ctx
, req
);
585 spin_unlock_irq(&ctx
->ctx_lock
);
588 EXPORT_SYMBOL(aio_put_req
);
590 static struct kioctx
*lookup_ioctx(unsigned long ctx_id
)
592 struct mm_struct
*mm
= current
->mm
;
593 struct kioctx
*ctx
, *ret
= NULL
;
597 hlist_for_each_entry_rcu(ctx
, &mm
->ioctx_list
, list
) {
599 * RCU protects us against accessing freed memory but
600 * we have to be careful not to get a reference when the
601 * reference count already dropped to 0 (ctx->dead test
602 * is unreliable because of races).
604 if (ctx
->user_id
== ctx_id
&& !ctx
->dead
&& try_get_ioctx(ctx
)){
615 * Queue up a kiocb to be retried. Assumes that the kiocb
616 * has already been marked as kicked, and places it on
617 * the retry run list for the corresponding ioctx, if it
618 * isn't already queued. Returns 1 if it actually queued
619 * the kiocb (to tell the caller to activate the work
620 * queue to process it), or 0, if it found that it was
623 static inline int __queue_kicked_iocb(struct kiocb
*iocb
)
625 struct kioctx
*ctx
= iocb
->ki_ctx
;
627 assert_spin_locked(&ctx
->ctx_lock
);
629 if (list_empty(&iocb
->ki_run_list
)) {
630 list_add_tail(&iocb
->ki_run_list
,
638 * This is the core aio execution routine. It is
639 * invoked both for initial i/o submission and
640 * subsequent retries via the aio_kick_handler.
641 * Expects to be invoked with iocb->ki_ctx->lock
642 * already held. The lock is released and reacquired
643 * as needed during processing.
645 * Calls the iocb retry method (already setup for the
646 * iocb on initial submission) for operation specific
647 * handling, but takes care of most of common retry
648 * execution details for a given iocb. The retry method
649 * needs to be non-blocking as far as possible, to avoid
650 * holding up other iocbs waiting to be serviced by the
651 * retry kernel thread.
653 * The trickier parts in this code have to do with
654 * ensuring that only one retry instance is in progress
655 * for a given iocb at any time. Providing that guarantee
656 * simplifies the coding of individual aio operations as
657 * it avoids various potential races.
659 static ssize_t
aio_run_iocb(struct kiocb
*iocb
)
661 struct kioctx
*ctx
= iocb
->ki_ctx
;
662 ssize_t (*retry
)(struct kiocb
*);
665 if (!(retry
= iocb
->ki_retry
)) {
666 printk("aio_run_iocb: iocb->ki_retry = NULL\n");
671 * We don't want the next retry iteration for this
672 * operation to start until this one has returned and
673 * updated the iocb state. However, wait_queue functions
674 * can trigger a kick_iocb from interrupt context in the
675 * meantime, indicating that data is available for the next
676 * iteration. We want to remember that and enable the
677 * next retry iteration _after_ we are through with
680 * So, in order to be able to register a "kick", but
681 * prevent it from being queued now, we clear the kick
682 * flag, but make the kick code *think* that the iocb is
683 * still on the run list until we are actually done.
684 * When we are done with this iteration, we check if
685 * the iocb was kicked in the meantime and if so, queue
689 kiocbClearKicked(iocb
);
692 * This is so that aio_complete knows it doesn't need to
693 * pull the iocb off the run list (We can't just call
694 * INIT_LIST_HEAD because we don't want a kick_iocb to
695 * queue this on the run list yet)
697 iocb
->ki_run_list
.next
= iocb
->ki_run_list
.prev
= NULL
;
698 spin_unlock_irq(&ctx
->ctx_lock
);
700 /* Quit retrying if the i/o has been cancelled */
701 if (kiocbIsCancelled(iocb
)) {
703 aio_complete(iocb
, ret
, 0);
704 /* must not access the iocb after this */
709 * Now we are all set to call the retry method in async
714 if (ret
!= -EIOCBRETRY
&& ret
!= -EIOCBQUEUED
) {
716 * There's no easy way to restart the syscall since other AIO's
717 * may be already running. Just fail this IO with EINTR.
719 if (unlikely(ret
== -ERESTARTSYS
|| ret
== -ERESTARTNOINTR
||
720 ret
== -ERESTARTNOHAND
|| ret
== -ERESTART_RESTARTBLOCK
))
722 aio_complete(iocb
, ret
, 0);
725 spin_lock_irq(&ctx
->ctx_lock
);
727 if (-EIOCBRETRY
== ret
) {
729 * OK, now that we are done with this iteration
730 * and know that there is more left to go,
731 * this is where we let go so that a subsequent
732 * "kick" can start the next iteration
735 /* will make __queue_kicked_iocb succeed from here on */
736 INIT_LIST_HEAD(&iocb
->ki_run_list
);
737 /* we must queue the next iteration ourselves, if it
738 * has already been kicked */
739 if (kiocbIsKicked(iocb
)) {
740 __queue_kicked_iocb(iocb
);
743 * __queue_kicked_iocb will always return 1 here, because
744 * iocb->ki_run_list is empty at this point so it should
745 * be safe to unconditionally queue the context into the
756 * Process all pending retries queued on the ioctx
758 * Assumes it is operating within the aio issuer's mm
761 static int __aio_run_iocbs(struct kioctx
*ctx
)
764 struct list_head run_list
;
766 assert_spin_locked(&ctx
->ctx_lock
);
768 list_replace_init(&ctx
->run_list
, &run_list
);
769 while (!list_empty(&run_list
)) {
770 iocb
= list_entry(run_list
.next
, struct kiocb
,
772 list_del(&iocb
->ki_run_list
);
774 * Hold an extra reference while retrying i/o.
776 iocb
->ki_users
++; /* grab extra reference */
778 __aio_put_req(ctx
, iocb
);
780 if (!list_empty(&ctx
->run_list
))
785 static void aio_queue_work(struct kioctx
* ctx
)
787 unsigned long timeout
;
789 * if someone is waiting, get the work started right
790 * away, otherwise, use a longer delay
793 if (waitqueue_active(&ctx
->wait
))
797 queue_delayed_work(aio_wq
, &ctx
->wq
, timeout
);
802 * Process all pending retries queued on the ioctx
803 * run list, and keep running them until the list
805 * Assumes it is operating within the aio issuer's mm context.
807 static inline void aio_run_all_iocbs(struct kioctx
*ctx
)
809 spin_lock_irq(&ctx
->ctx_lock
);
810 while (__aio_run_iocbs(ctx
))
812 spin_unlock_irq(&ctx
->ctx_lock
);
817 * Work queue handler triggered to process pending
818 * retries on an ioctx. Takes on the aio issuer's
819 * mm context before running the iocbs, so that
820 * copy_xxx_user operates on the issuer's address
822 * Run on aiod's context.
824 static void aio_kick_handler(struct work_struct
*work
)
826 struct kioctx
*ctx
= container_of(work
, struct kioctx
, wq
.work
);
827 mm_segment_t oldfs
= get_fs();
828 struct mm_struct
*mm
;
833 spin_lock_irq(&ctx
->ctx_lock
);
834 requeue
=__aio_run_iocbs(ctx
);
836 spin_unlock_irq(&ctx
->ctx_lock
);
840 * we're in a worker thread already; no point using non-zero delay
843 queue_delayed_work(aio_wq
, &ctx
->wq
, 0);
848 * Called by kick_iocb to queue the kiocb for retry
849 * and if required activate the aio work queue to process
852 static void try_queue_kicked_iocb(struct kiocb
*iocb
)
854 struct kioctx
*ctx
= iocb
->ki_ctx
;
858 spin_lock_irqsave(&ctx
->ctx_lock
, flags
);
859 /* set this inside the lock so that we can't race with aio_run_iocb()
860 * testing it and putting the iocb on the run list under the lock */
861 if (!kiocbTryKick(iocb
))
862 run
= __queue_kicked_iocb(iocb
);
863 spin_unlock_irqrestore(&ctx
->ctx_lock
, flags
);
870 * Called typically from a wait queue callback context
871 * to trigger a retry of the iocb.
872 * The retry is usually executed by aio workqueue
873 * threads (See aio_kick_handler).
875 void kick_iocb(struct kiocb
*iocb
)
877 /* sync iocbs are easy: they can only ever be executing from a
879 if (is_sync_kiocb(iocb
)) {
880 kiocbSetKicked(iocb
);
881 wake_up_process(iocb
->ki_obj
.tsk
);
885 try_queue_kicked_iocb(iocb
);
887 EXPORT_SYMBOL(kick_iocb
);
890 * Called when the io request on the given iocb is complete.
891 * Returns true if this is the last user of the request. The
892 * only other user of the request can be the cancellation code.
894 int aio_complete(struct kiocb
*iocb
, long res
, long res2
)
896 struct kioctx
*ctx
= iocb
->ki_ctx
;
897 struct aio_ring_info
*info
;
898 struct aio_ring
*ring
;
899 struct io_event
*event
;
905 * Special case handling for sync iocbs:
906 * - events go directly into the iocb for fast handling
907 * - the sync task with the iocb in its stack holds the single iocb
908 * ref, no other paths have a way to get another ref
909 * - the sync task helpfully left a reference to itself in the iocb
911 if (is_sync_kiocb(iocb
)) {
912 BUG_ON(iocb
->ki_users
!= 1);
913 iocb
->ki_user_data
= res
;
915 wake_up_process(iocb
->ki_obj
.tsk
);
919 info
= &ctx
->ring_info
;
921 /* add a completion event to the ring buffer.
922 * must be done holding ctx->ctx_lock to prevent
923 * other code from messing with the tail
924 * pointer since we might be called from irq
927 spin_lock_irqsave(&ctx
->ctx_lock
, flags
);
929 if (iocb
->ki_run_list
.prev
&& !list_empty(&iocb
->ki_run_list
))
930 list_del_init(&iocb
->ki_run_list
);
933 * cancelled requests don't get events, userland was given one
934 * when the event got cancelled.
936 if (kiocbIsCancelled(iocb
))
939 ring
= kmap_atomic(info
->ring_pages
[0]);
942 event
= aio_ring_event(info
, tail
);
943 if (++tail
>= info
->nr
)
946 event
->obj
= (u64
)(unsigned long)iocb
->ki_obj
.user
;
947 event
->data
= iocb
->ki_user_data
;
951 dprintk("aio_complete: %p[%lu]: %p: %p %Lx %lx %lx\n",
952 ctx
, tail
, iocb
, iocb
->ki_obj
.user
, iocb
->ki_user_data
,
955 /* after flagging the request as done, we
956 * must never even look at it again
958 smp_wmb(); /* make event visible before updating tail */
963 put_aio_ring_event(event
);
966 pr_debug("added to ring %p at [%lu]\n", iocb
, tail
);
969 * Check if the user asked us to deliver the result through an
970 * eventfd. The eventfd_signal() function is safe to be called
973 if (iocb
->ki_eventfd
!= NULL
)
974 eventfd_signal(iocb
->ki_eventfd
, 1);
977 /* everything turned out well, dispose of the aiocb. */
978 ret
= __aio_put_req(ctx
, iocb
);
981 * We have to order our ring_info tail store above and test
982 * of the wait list below outside the wait lock. This is
983 * like in wake_up_bit() where clearing a bit has to be
984 * ordered with the unlocked test.
988 if (waitqueue_active(&ctx
->wait
))
991 spin_unlock_irqrestore(&ctx
->ctx_lock
, flags
);
994 EXPORT_SYMBOL(aio_complete
);
997 * Pull an event off of the ioctx's event ring. Returns the number of
998 * events fetched (0 or 1 ;-)
999 * FIXME: make this use cmpxchg.
1000 * TODO: make the ringbuffer user mmap()able (requires FIXME).
1002 static int aio_read_evt(struct kioctx
*ioctx
, struct io_event
*ent
)
1004 struct aio_ring_info
*info
= &ioctx
->ring_info
;
1005 struct aio_ring
*ring
;
1009 ring
= kmap_atomic(info
->ring_pages
[0]);
1010 dprintk("in aio_read_evt h%lu t%lu m%lu\n",
1011 (unsigned long)ring
->head
, (unsigned long)ring
->tail
,
1012 (unsigned long)ring
->nr
);
1014 if (ring
->head
== ring
->tail
)
1017 spin_lock(&info
->ring_lock
);
1019 head
= ring
->head
% info
->nr
;
1020 if (head
!= ring
->tail
) {
1021 struct io_event
*evp
= aio_ring_event(info
, head
);
1023 head
= (head
+ 1) % info
->nr
;
1024 smp_mb(); /* finish reading the event before updatng the head */
1027 put_aio_ring_event(evp
);
1029 spin_unlock(&info
->ring_lock
);
1032 kunmap_atomic(ring
);
1033 dprintk("leaving aio_read_evt: %d h%lu t%lu\n", ret
,
1034 (unsigned long)ring
->head
, (unsigned long)ring
->tail
);
1038 struct aio_timeout
{
1039 struct timer_list timer
;
1041 struct task_struct
*p
;
1044 static void timeout_func(unsigned long data
)
1046 struct aio_timeout
*to
= (struct aio_timeout
*)data
;
1049 wake_up_process(to
->p
);
1052 static inline void init_timeout(struct aio_timeout
*to
)
1054 setup_timer_on_stack(&to
->timer
, timeout_func
, (unsigned long) to
);
1059 static inline void set_timeout(long start_jiffies
, struct aio_timeout
*to
,
1060 const struct timespec
*ts
)
1062 to
->timer
.expires
= start_jiffies
+ timespec_to_jiffies(ts
);
1063 if (time_after(to
->timer
.expires
, jiffies
))
1064 add_timer(&to
->timer
);
1069 static inline void clear_timeout(struct aio_timeout
*to
)
1071 del_singleshot_timer_sync(&to
->timer
);
1074 static int read_events(struct kioctx
*ctx
,
1075 long min_nr
, long nr
,
1076 struct io_event __user
*event
,
1077 struct timespec __user
*timeout
)
1079 long start_jiffies
= jiffies
;
1080 struct task_struct
*tsk
= current
;
1081 DECLARE_WAITQUEUE(wait
, tsk
);
1084 struct io_event ent
;
1085 struct aio_timeout to
;
1088 /* needed to zero any padding within an entry (there shouldn't be
1089 * any, but C is fun!
1091 memset(&ent
, 0, sizeof(ent
));
1094 while (likely(i
< nr
)) {
1095 ret
= aio_read_evt(ctx
, &ent
);
1096 if (unlikely(ret
<= 0))
1099 dprintk("read event: %Lx %Lx %Lx %Lx\n",
1100 ent
.data
, ent
.obj
, ent
.res
, ent
.res2
);
1102 /* Could we split the check in two? */
1104 if (unlikely(copy_to_user(event
, &ent
, sizeof(ent
)))) {
1105 dprintk("aio: lost an event due to EFAULT.\n");
1110 /* Good, event copied to userland, update counts. */
1122 /* racey check, but it gets redone */
1123 if (!retry
&& unlikely(!list_empty(&ctx
->run_list
))) {
1125 aio_run_all_iocbs(ctx
);
1133 if (unlikely(copy_from_user(&ts
, timeout
, sizeof(ts
))))
1136 set_timeout(start_jiffies
, &to
, &ts
);
1139 while (likely(i
< nr
)) {
1140 add_wait_queue_exclusive(&ctx
->wait
, &wait
);
1142 set_task_state(tsk
, TASK_INTERRUPTIBLE
);
1143 ret
= aio_read_evt(ctx
, &ent
);
1148 if (unlikely(ctx
->dead
)) {
1152 if (to
.timed_out
) /* Only check after read evt */
1154 /* Try to only show up in io wait if there are ops
1156 if (ctx
->reqs_active
)
1160 if (signal_pending(tsk
)) {
1164 /*ret = aio_read_evt(ctx, &ent);*/
1167 set_task_state(tsk
, TASK_RUNNING
);
1168 remove_wait_queue(&ctx
->wait
, &wait
);
1170 if (unlikely(ret
<= 0))
1174 if (unlikely(copy_to_user(event
, &ent
, sizeof(ent
)))) {
1175 dprintk("aio: lost an event due to EFAULT.\n");
1179 /* Good, event copied to userland, update counts. */
1187 destroy_timer_on_stack(&to
.timer
);
1191 /* Take an ioctx and remove it from the list of ioctx's. Protects
1192 * against races with itself via ->dead.
1194 static void io_destroy(struct kioctx
*ioctx
)
1196 struct mm_struct
*mm
= current
->mm
;
1199 /* delete the entry from the list is someone else hasn't already */
1200 spin_lock(&mm
->ioctx_lock
);
1201 was_dead
= ioctx
->dead
;
1203 hlist_del_rcu(&ioctx
->list
);
1204 spin_unlock(&mm
->ioctx_lock
);
1206 dprintk("aio_release(%p)\n", ioctx
);
1207 if (likely(!was_dead
))
1208 put_ioctx(ioctx
); /* twice for the list */
1213 * Wake up any waiters. The setting of ctx->dead must be seen
1214 * by other CPUs at this point. Right now, we rely on the
1215 * locking done by the above calls to ensure this consistency.
1217 wake_up_all(&ioctx
->wait
);
1221 * Create an aio_context capable of receiving at least nr_events.
1222 * ctxp must not point to an aio_context that already exists, and
1223 * must be initialized to 0 prior to the call. On successful
1224 * creation of the aio_context, *ctxp is filled in with the resulting
1225 * handle. May fail with -EINVAL if *ctxp is not initialized,
1226 * if the specified nr_events exceeds internal limits. May fail
1227 * with -EAGAIN if the specified nr_events exceeds the user's limit
1228 * of available events. May fail with -ENOMEM if insufficient kernel
1229 * resources are available. May fail with -EFAULT if an invalid
1230 * pointer is passed for ctxp. Will fail with -ENOSYS if not
1233 SYSCALL_DEFINE2(io_setup
, unsigned, nr_events
, aio_context_t __user
*, ctxp
)
1235 struct kioctx
*ioctx
= NULL
;
1239 ret
= get_user(ctx
, ctxp
);
1244 if (unlikely(ctx
|| nr_events
== 0)) {
1245 pr_debug("EINVAL: io_setup: ctx %lu nr_events %u\n",
1250 ioctx
= ioctx_alloc(nr_events
);
1251 ret
= PTR_ERR(ioctx
);
1252 if (!IS_ERR(ioctx
)) {
1253 ret
= put_user(ioctx
->user_id
, ctxp
);
1264 * Destroy the aio_context specified. May cancel any outstanding
1265 * AIOs and block on completion. Will fail with -ENOSYS if not
1266 * implemented. May fail with -EINVAL if the context pointed to
1269 SYSCALL_DEFINE1(io_destroy
, aio_context_t
, ctx
)
1271 struct kioctx
*ioctx
= lookup_ioctx(ctx
);
1272 if (likely(NULL
!= ioctx
)) {
1277 pr_debug("EINVAL: io_destroy: invalid context id\n");
1281 static void aio_advance_iovec(struct kiocb
*iocb
, ssize_t ret
)
1283 struct iovec
*iov
= &iocb
->ki_iovec
[iocb
->ki_cur_seg
];
1287 while (iocb
->ki_cur_seg
< iocb
->ki_nr_segs
&& ret
> 0) {
1288 ssize_t
this = min((ssize_t
)iov
->iov_len
, ret
);
1289 iov
->iov_base
+= this;
1290 iov
->iov_len
-= this;
1291 iocb
->ki_left
-= this;
1293 if (iov
->iov_len
== 0) {
1299 /* the caller should not have done more io than what fit in
1300 * the remaining iovecs */
1301 BUG_ON(ret
> 0 && iocb
->ki_left
== 0);
1304 static ssize_t
aio_rw_vect_retry(struct kiocb
*iocb
)
1306 struct file
*file
= iocb
->ki_filp
;
1307 struct address_space
*mapping
= file
->f_mapping
;
1308 struct inode
*inode
= mapping
->host
;
1309 ssize_t (*rw_op
)(struct kiocb
*, const struct iovec
*,
1310 unsigned long, loff_t
);
1312 unsigned short opcode
;
1314 if ((iocb
->ki_opcode
== IOCB_CMD_PREADV
) ||
1315 (iocb
->ki_opcode
== IOCB_CMD_PREAD
)) {
1316 rw_op
= file
->f_op
->aio_read
;
1317 opcode
= IOCB_CMD_PREADV
;
1319 rw_op
= file
->f_op
->aio_write
;
1320 opcode
= IOCB_CMD_PWRITEV
;
1323 /* This matches the pread()/pwrite() logic */
1324 if (iocb
->ki_pos
< 0)
1328 ret
= rw_op(iocb
, &iocb
->ki_iovec
[iocb
->ki_cur_seg
],
1329 iocb
->ki_nr_segs
- iocb
->ki_cur_seg
,
1332 aio_advance_iovec(iocb
, ret
);
1334 /* retry all partial writes. retry partial reads as long as its a
1336 } while (ret
> 0 && iocb
->ki_left
> 0 &&
1337 (opcode
== IOCB_CMD_PWRITEV
||
1338 (!S_ISFIFO(inode
->i_mode
) && !S_ISSOCK(inode
->i_mode
))));
1340 /* This means we must have transferred all that we could */
1341 /* No need to retry anymore */
1342 if ((ret
== 0) || (iocb
->ki_left
== 0))
1343 ret
= iocb
->ki_nbytes
- iocb
->ki_left
;
1345 /* If we managed to write some out we return that, rather than
1346 * the eventual error. */
1347 if (opcode
== IOCB_CMD_PWRITEV
1348 && ret
< 0 && ret
!= -EIOCBQUEUED
&& ret
!= -EIOCBRETRY
1349 && iocb
->ki_nbytes
- iocb
->ki_left
)
1350 ret
= iocb
->ki_nbytes
- iocb
->ki_left
;
1355 static ssize_t
aio_fdsync(struct kiocb
*iocb
)
1357 struct file
*file
= iocb
->ki_filp
;
1358 ssize_t ret
= -EINVAL
;
1360 if (file
->f_op
->aio_fsync
)
1361 ret
= file
->f_op
->aio_fsync(iocb
, 1);
1365 static ssize_t
aio_fsync(struct kiocb
*iocb
)
1367 struct file
*file
= iocb
->ki_filp
;
1368 ssize_t ret
= -EINVAL
;
1370 if (file
->f_op
->aio_fsync
)
1371 ret
= file
->f_op
->aio_fsync(iocb
, 0);
1375 static ssize_t
aio_setup_vectored_rw(int type
, struct kiocb
*kiocb
, bool compat
)
1379 #ifdef CONFIG_COMPAT
1381 ret
= compat_rw_copy_check_uvector(type
,
1382 (struct compat_iovec __user
*)kiocb
->ki_buf
,
1383 kiocb
->ki_nbytes
, 1, &kiocb
->ki_inline_vec
,
1387 ret
= rw_copy_check_uvector(type
,
1388 (struct iovec __user
*)kiocb
->ki_buf
,
1389 kiocb
->ki_nbytes
, 1, &kiocb
->ki_inline_vec
,
1394 ret
= rw_verify_area(type
, kiocb
->ki_filp
, &kiocb
->ki_pos
, ret
);
1398 kiocb
->ki_nr_segs
= kiocb
->ki_nbytes
;
1399 kiocb
->ki_cur_seg
= 0;
1400 /* ki_nbytes/left now reflect bytes instead of segs */
1401 kiocb
->ki_nbytes
= ret
;
1402 kiocb
->ki_left
= ret
;
1409 static ssize_t
aio_setup_single_vector(int type
, struct file
* file
, struct kiocb
*kiocb
)
1413 bytes
= rw_verify_area(type
, file
, &kiocb
->ki_pos
, kiocb
->ki_left
);
1417 kiocb
->ki_iovec
= &kiocb
->ki_inline_vec
;
1418 kiocb
->ki_iovec
->iov_base
= kiocb
->ki_buf
;
1419 kiocb
->ki_iovec
->iov_len
= bytes
;
1420 kiocb
->ki_nr_segs
= 1;
1421 kiocb
->ki_cur_seg
= 0;
1427 * Performs the initial checks and aio retry method
1428 * setup for the kiocb at the time of io submission.
1430 static ssize_t
aio_setup_iocb(struct kiocb
*kiocb
, bool compat
)
1432 struct file
*file
= kiocb
->ki_filp
;
1435 switch (kiocb
->ki_opcode
) {
1436 case IOCB_CMD_PREAD
:
1438 if (unlikely(!(file
->f_mode
& FMODE_READ
)))
1441 if (unlikely(!access_ok(VERIFY_WRITE
, kiocb
->ki_buf
,
1444 ret
= aio_setup_single_vector(READ
, file
, kiocb
);
1448 if (file
->f_op
->aio_read
)
1449 kiocb
->ki_retry
= aio_rw_vect_retry
;
1451 case IOCB_CMD_PWRITE
:
1453 if (unlikely(!(file
->f_mode
& FMODE_WRITE
)))
1456 if (unlikely(!access_ok(VERIFY_READ
, kiocb
->ki_buf
,
1459 ret
= aio_setup_single_vector(WRITE
, file
, kiocb
);
1463 if (file
->f_op
->aio_write
)
1464 kiocb
->ki_retry
= aio_rw_vect_retry
;
1466 case IOCB_CMD_PREADV
:
1468 if (unlikely(!(file
->f_mode
& FMODE_READ
)))
1470 ret
= aio_setup_vectored_rw(READ
, kiocb
, compat
);
1474 if (file
->f_op
->aio_read
)
1475 kiocb
->ki_retry
= aio_rw_vect_retry
;
1477 case IOCB_CMD_PWRITEV
:
1479 if (unlikely(!(file
->f_mode
& FMODE_WRITE
)))
1481 ret
= aio_setup_vectored_rw(WRITE
, kiocb
, compat
);
1485 if (file
->f_op
->aio_write
)
1486 kiocb
->ki_retry
= aio_rw_vect_retry
;
1488 case IOCB_CMD_FDSYNC
:
1490 if (file
->f_op
->aio_fsync
)
1491 kiocb
->ki_retry
= aio_fdsync
;
1493 case IOCB_CMD_FSYNC
:
1495 if (file
->f_op
->aio_fsync
)
1496 kiocb
->ki_retry
= aio_fsync
;
1499 dprintk("EINVAL: io_submit: no operation provided\n");
1503 if (!kiocb
->ki_retry
)
1509 static int io_submit_one(struct kioctx
*ctx
, struct iocb __user
*user_iocb
,
1510 struct iocb
*iocb
, struct kiocb_batch
*batch
,
1517 /* enforce forwards compatibility on users */
1518 if (unlikely(iocb
->aio_reserved1
|| iocb
->aio_reserved2
)) {
1519 pr_debug("EINVAL: io_submit: reserve field set\n");
1523 /* prevent overflows */
1525 (iocb
->aio_buf
!= (unsigned long)iocb
->aio_buf
) ||
1526 (iocb
->aio_nbytes
!= (size_t)iocb
->aio_nbytes
) ||
1527 ((ssize_t
)iocb
->aio_nbytes
< 0)
1529 pr_debug("EINVAL: io_submit: overflow check\n");
1533 file
= fget(iocb
->aio_fildes
);
1534 if (unlikely(!file
))
1537 req
= aio_get_req(ctx
, batch
); /* returns with 2 references to req */
1538 if (unlikely(!req
)) {
1542 req
->ki_filp
= file
;
1543 if (iocb
->aio_flags
& IOCB_FLAG_RESFD
) {
1545 * If the IOCB_FLAG_RESFD flag of aio_flags is set, get an
1546 * instance of the file* now. The file descriptor must be
1547 * an eventfd() fd, and will be signaled for each completed
1548 * event using the eventfd_signal() function.
1550 req
->ki_eventfd
= eventfd_ctx_fdget((int) iocb
->aio_resfd
);
1551 if (IS_ERR(req
->ki_eventfd
)) {
1552 ret
= PTR_ERR(req
->ki_eventfd
);
1553 req
->ki_eventfd
= NULL
;
1558 ret
= put_user(req
->ki_key
, &user_iocb
->aio_key
);
1559 if (unlikely(ret
)) {
1560 dprintk("EFAULT: aio_key\n");
1564 req
->ki_obj
.user
= user_iocb
;
1565 req
->ki_user_data
= iocb
->aio_data
;
1566 req
->ki_pos
= iocb
->aio_offset
;
1568 req
->ki_buf
= (char __user
*)(unsigned long)iocb
->aio_buf
;
1569 req
->ki_left
= req
->ki_nbytes
= iocb
->aio_nbytes
;
1570 req
->ki_opcode
= iocb
->aio_lio_opcode
;
1572 ret
= aio_setup_iocb(req
, compat
);
1577 spin_lock_irq(&ctx
->ctx_lock
);
1579 * We could have raced with io_destroy() and are currently holding a
1580 * reference to ctx which should be destroyed. We cannot submit IO
1581 * since ctx gets freed as soon as io_submit() puts its reference. The
1582 * check here is reliable: io_destroy() sets ctx->dead before waiting
1583 * for outstanding IO and the barrier between these two is realized by
1584 * unlock of mm->ioctx_lock and lock of ctx->ctx_lock. Analogously we
1585 * increment ctx->reqs_active before checking for ctx->dead and the
1586 * barrier is realized by unlock and lock of ctx->ctx_lock. Thus if we
1587 * don't see ctx->dead set here, io_destroy() waits for our IO to
1591 spin_unlock_irq(&ctx
->ctx_lock
);
1596 if (!list_empty(&ctx
->run_list
)) {
1597 /* drain the run list */
1598 while (__aio_run_iocbs(ctx
))
1601 spin_unlock_irq(&ctx
->ctx_lock
);
1603 aio_put_req(req
); /* drop extra ref to req */
1607 aio_put_req(req
); /* drop extra ref to req */
1608 aio_put_req(req
); /* drop i/o ref to req */
1612 long do_io_submit(aio_context_t ctx_id
, long nr
,
1613 struct iocb __user
*__user
*iocbpp
, bool compat
)
1618 struct blk_plug plug
;
1619 struct kiocb_batch batch
;
1621 if (unlikely(nr
< 0))
1624 if (unlikely(nr
> LONG_MAX
/sizeof(*iocbpp
)))
1625 nr
= LONG_MAX
/sizeof(*iocbpp
);
1627 if (unlikely(!access_ok(VERIFY_READ
, iocbpp
, (nr
*sizeof(*iocbpp
)))))
1630 ctx
= lookup_ioctx(ctx_id
);
1631 if (unlikely(!ctx
)) {
1632 pr_debug("EINVAL: io_submit: invalid context id\n");
1636 kiocb_batch_init(&batch
, nr
);
1638 blk_start_plug(&plug
);
1641 * AKPM: should this return a partial result if some of the IOs were
1642 * successfully submitted?
1644 for (i
=0; i
<nr
; i
++) {
1645 struct iocb __user
*user_iocb
;
1648 if (unlikely(__get_user(user_iocb
, iocbpp
+ i
))) {
1653 if (unlikely(copy_from_user(&tmp
, user_iocb
, sizeof(tmp
)))) {
1658 ret
= io_submit_one(ctx
, user_iocb
, &tmp
, &batch
, compat
);
1662 blk_finish_plug(&plug
);
1664 kiocb_batch_free(ctx
, &batch
);
1670 * Queue the nr iocbs pointed to by iocbpp for processing. Returns
1671 * the number of iocbs queued. May return -EINVAL if the aio_context
1672 * specified by ctx_id is invalid, if nr is < 0, if the iocb at
1673 * *iocbpp[0] is not properly initialized, if the operation specified
1674 * is invalid for the file descriptor in the iocb. May fail with
1675 * -EFAULT if any of the data structures point to invalid data. May
1676 * fail with -EBADF if the file descriptor specified in the first
1677 * iocb is invalid. May fail with -EAGAIN if insufficient resources
1678 * are available to queue any iocbs. Will return 0 if nr is 0. Will
1679 * fail with -ENOSYS if not implemented.
1681 SYSCALL_DEFINE3(io_submit
, aio_context_t
, ctx_id
, long, nr
,
1682 struct iocb __user
* __user
*, iocbpp
)
1684 return do_io_submit(ctx_id
, nr
, iocbpp
, 0);
1688 * Finds a given iocb for cancellation.
1690 static struct kiocb
*lookup_kiocb(struct kioctx
*ctx
, struct iocb __user
*iocb
,
1693 struct list_head
*pos
;
1695 assert_spin_locked(&ctx
->ctx_lock
);
1697 /* TODO: use a hash or array, this sucks. */
1698 list_for_each(pos
, &ctx
->active_reqs
) {
1699 struct kiocb
*kiocb
= list_kiocb(pos
);
1700 if (kiocb
->ki_obj
.user
== iocb
&& kiocb
->ki_key
== key
)
1707 * Attempts to cancel an iocb previously passed to io_submit. If
1708 * the operation is successfully cancelled, the resulting event is
1709 * copied into the memory pointed to by result without being placed
1710 * into the completion queue and 0 is returned. May fail with
1711 * -EFAULT if any of the data structures pointed to are invalid.
1712 * May fail with -EINVAL if aio_context specified by ctx_id is
1713 * invalid. May fail with -EAGAIN if the iocb specified was not
1714 * cancelled. Will fail with -ENOSYS if not implemented.
1716 SYSCALL_DEFINE3(io_cancel
, aio_context_t
, ctx_id
, struct iocb __user
*, iocb
,
1717 struct io_event __user
*, result
)
1719 int (*cancel
)(struct kiocb
*iocb
, struct io_event
*res
);
1721 struct kiocb
*kiocb
;
1725 ret
= get_user(key
, &iocb
->aio_key
);
1729 ctx
= lookup_ioctx(ctx_id
);
1733 spin_lock_irq(&ctx
->ctx_lock
);
1735 kiocb
= lookup_kiocb(ctx
, iocb
, key
);
1736 if (kiocb
&& kiocb
->ki_cancel
) {
1737 cancel
= kiocb
->ki_cancel
;
1739 kiocbSetCancelled(kiocb
);
1742 spin_unlock_irq(&ctx
->ctx_lock
);
1744 if (NULL
!= cancel
) {
1745 struct io_event tmp
;
1746 pr_debug("calling cancel\n");
1747 memset(&tmp
, 0, sizeof(tmp
));
1748 tmp
.obj
= (u64
)(unsigned long)kiocb
->ki_obj
.user
;
1749 tmp
.data
= kiocb
->ki_user_data
;
1750 ret
= cancel(kiocb
, &tmp
);
1752 /* Cancellation succeeded -- copy the result
1753 * into the user's buffer.
1755 if (copy_to_user(result
, &tmp
, sizeof(tmp
)))
1767 * Attempts to read at least min_nr events and up to nr events from
1768 * the completion queue for the aio_context specified by ctx_id. If
1769 * it succeeds, the number of read events is returned. May fail with
1770 * -EINVAL if ctx_id is invalid, if min_nr is out of range, if nr is
1771 * out of range, if timeout is out of range. May fail with -EFAULT
1772 * if any of the memory specified is invalid. May return 0 or
1773 * < min_nr if the timeout specified by timeout has elapsed
1774 * before sufficient events are available, where timeout == NULL
1775 * specifies an infinite timeout. Note that the timeout pointed to by
1776 * timeout is relative and will be updated if not NULL and the
1777 * operation blocks. Will fail with -ENOSYS if not implemented.
1779 SYSCALL_DEFINE5(io_getevents
, aio_context_t
, ctx_id
,
1782 struct io_event __user
*, events
,
1783 struct timespec __user
*, timeout
)
1785 struct kioctx
*ioctx
= lookup_ioctx(ctx_id
);
1788 if (likely(ioctx
)) {
1789 if (likely(min_nr
<= nr
&& min_nr
>= 0))
1790 ret
= read_events(ioctx
, min_nr
, nr
, events
, timeout
);
1794 asmlinkage_protect(5, ret
, ctx_id
, min_nr
, nr
, events
, timeout
);