2 * An async IO implementation for Linux
3 * Written by Benjamin LaHaise <bcrl@kvack.org>
5 * Implements an efficient asynchronous io interface.
7 * Copyright 2000, 2001, 2002 Red Hat, Inc. All Rights Reserved.
9 * See ../COPYING for licensing terms.
11 #define pr_fmt(fmt) "%s: " fmt, __func__
13 #include <linux/kernel.h>
14 #include <linux/init.h>
15 #include <linux/errno.h>
16 #include <linux/time.h>
17 #include <linux/aio_abi.h>
18 #include <linux/export.h>
19 #include <linux/syscalls.h>
20 #include <linux/backing-dev.h>
21 #include <linux/uio.h>
23 #include <linux/sched.h>
25 #include <linux/file.h>
27 #include <linux/mman.h>
28 #include <linux/mmu_context.h>
29 #include <linux/percpu.h>
30 #include <linux/slab.h>
31 #include <linux/timer.h>
32 #include <linux/aio.h>
33 #include <linux/highmem.h>
34 #include <linux/workqueue.h>
35 #include <linux/security.h>
36 #include <linux/eventfd.h>
37 #include <linux/blkdev.h>
38 #include <linux/compat.h>
39 #include <linux/migrate.h>
40 #include <linux/ramfs.h>
41 #include <linux/percpu-refcount.h>
42 #include <linux/mount.h>
44 #include <asm/kmap_types.h>
45 #include <asm/uaccess.h>
49 #define AIO_RING_MAGIC 0xa10a10a1
50 #define AIO_RING_COMPAT_FEATURES 1
51 #define AIO_RING_INCOMPAT_FEATURES 0
53 unsigned id
; /* kernel internal index number */
54 unsigned nr
; /* number of io_events */
55 unsigned head
; /* Written to by userland or under ring_lock
56 * mutex by aio_read_events_ring(). */
60 unsigned compat_features
;
61 unsigned incompat_features
;
62 unsigned header_length
; /* size of aio_ring */
65 struct io_event io_events
[0];
66 }; /* 128 bytes + ring size */
68 #define AIO_RING_PAGES 8
73 struct kioctx
*table
[];
77 unsigned reqs_available
;
81 struct completion comp
;
86 struct percpu_ref users
;
89 struct percpu_ref reqs
;
91 unsigned long user_id
;
93 struct __percpu kioctx_cpu
*cpu
;
96 * For percpu reqs_available, number of slots we move to/from global
101 * This is what userspace passed to io_setup(), it's not used for
102 * anything but counting against the global max_reqs quota.
104 * The real limit is nr_events - 1, which will be larger (see
109 /* Size of ringbuffer, in units of struct io_event */
112 unsigned long mmap_base
;
113 unsigned long mmap_size
;
115 struct page
**ring_pages
;
118 struct work_struct free_work
;
121 * signals when all in-flight requests are done
123 struct ctx_rq_wait
*rq_wait
;
127 * This counts the number of available slots in the ringbuffer,
128 * so we avoid overflowing it: it's decremented (if positive)
129 * when allocating a kiocb and incremented when the resulting
130 * io_event is pulled off the ringbuffer.
132 * We batch accesses to it with a percpu version.
134 atomic_t reqs_available
;
135 } ____cacheline_aligned_in_smp
;
139 struct list_head active_reqs
; /* used for cancellation */
140 } ____cacheline_aligned_in_smp
;
143 struct mutex ring_lock
;
144 wait_queue_head_t wait
;
145 } ____cacheline_aligned_in_smp
;
149 unsigned completed_events
;
150 spinlock_t completion_lock
;
151 } ____cacheline_aligned_in_smp
;
153 struct page
*internal_pages
[AIO_RING_PAGES
];
154 struct file
*aio_ring_file
;
160 * We use ki_cancel == KIOCB_CANCELLED to indicate that a kiocb has been either
161 * cancelled or completed (this makes a certain amount of sense because
162 * successful cancellation - io_cancel() - does deliver the completion to
165 * And since most things don't implement kiocb cancellation and we'd really like
166 * kiocb completion to be lockless when possible, we use ki_cancel to
167 * synchronize cancellation and completion - we only set it to KIOCB_CANCELLED
168 * with xchg() or cmpxchg(), see batch_complete_aio() and kiocb_cancel().
170 #define KIOCB_CANCELLED ((void *) (~0ULL))
175 struct kioctx
*ki_ctx
;
176 kiocb_cancel_fn
*ki_cancel
;
178 struct iocb __user
*ki_user_iocb
; /* user's aiocb */
179 __u64 ki_user_data
; /* user's data for completion */
181 struct list_head ki_list
; /* the aio core uses this
182 * for cancellation */
185 * If the aio_resfd field of the userspace iocb is not zero,
186 * this is the underlying eventfd context to deliver events to.
188 struct eventfd_ctx
*ki_eventfd
;
191 /*------ sysctl variables----*/
192 static DEFINE_SPINLOCK(aio_nr_lock
);
193 unsigned long aio_nr
; /* current system wide number of aio requests */
194 unsigned long aio_max_nr
= 0x10000; /* system wide maximum number of aio requests */
195 /*----end sysctl variables---*/
197 static struct kmem_cache
*kiocb_cachep
;
198 static struct kmem_cache
*kioctx_cachep
;
200 static struct vfsmount
*aio_mnt
;
202 static const struct file_operations aio_ring_fops
;
203 static const struct address_space_operations aio_ctx_aops
;
205 static struct file
*aio_private_file(struct kioctx
*ctx
, loff_t nr_pages
)
207 struct qstr
this = QSTR_INIT("[aio]", 5);
210 struct inode
*inode
= alloc_anon_inode(aio_mnt
->mnt_sb
);
212 return ERR_CAST(inode
);
214 inode
->i_mapping
->a_ops
= &aio_ctx_aops
;
215 inode
->i_mapping
->private_data
= ctx
;
216 inode
->i_size
= PAGE_SIZE
* nr_pages
;
218 path
.dentry
= d_alloc_pseudo(aio_mnt
->mnt_sb
, &this);
221 return ERR_PTR(-ENOMEM
);
223 path
.mnt
= mntget(aio_mnt
);
225 d_instantiate(path
.dentry
, inode
);
226 file
= alloc_file(&path
, FMODE_READ
| FMODE_WRITE
, &aio_ring_fops
);
232 file
->f_flags
= O_RDWR
;
236 static struct dentry
*aio_mount(struct file_system_type
*fs_type
,
237 int flags
, const char *dev_name
, void *data
)
239 static const struct dentry_operations ops
= {
240 .d_dname
= simple_dname
,
242 return mount_pseudo(fs_type
, "aio:", NULL
, &ops
, AIO_RING_MAGIC
);
246 * Creates the slab caches used by the aio routines, panic on
247 * failure as this is done early during the boot sequence.
249 static int __init
aio_setup(void)
251 static struct file_system_type aio_fs
= {
254 .kill_sb
= kill_anon_super
,
256 aio_mnt
= kern_mount(&aio_fs
);
258 panic("Failed to create aio fs mount.");
260 kiocb_cachep
= KMEM_CACHE(aio_kiocb
, SLAB_HWCACHE_ALIGN
|SLAB_PANIC
);
261 kioctx_cachep
= KMEM_CACHE(kioctx
,SLAB_HWCACHE_ALIGN
|SLAB_PANIC
);
263 pr_debug("sizeof(struct page) = %zu\n", sizeof(struct page
));
267 __initcall(aio_setup
);
269 static void put_aio_ring_file(struct kioctx
*ctx
)
271 struct file
*aio_ring_file
= ctx
->aio_ring_file
;
273 truncate_setsize(aio_ring_file
->f_inode
, 0);
275 /* Prevent further access to the kioctx from migratepages */
276 spin_lock(&aio_ring_file
->f_inode
->i_mapping
->private_lock
);
277 aio_ring_file
->f_inode
->i_mapping
->private_data
= NULL
;
278 ctx
->aio_ring_file
= NULL
;
279 spin_unlock(&aio_ring_file
->f_inode
->i_mapping
->private_lock
);
285 static void aio_free_ring(struct kioctx
*ctx
)
289 /* Disconnect the kiotx from the ring file. This prevents future
290 * accesses to the kioctx from page migration.
292 put_aio_ring_file(ctx
);
294 for (i
= 0; i
< ctx
->nr_pages
; i
++) {
296 pr_debug("pid(%d) [%d] page->count=%d\n", current
->pid
, i
,
297 page_count(ctx
->ring_pages
[i
]));
298 page
= ctx
->ring_pages
[i
];
301 ctx
->ring_pages
[i
] = NULL
;
305 if (ctx
->ring_pages
&& ctx
->ring_pages
!= ctx
->internal_pages
) {
306 kfree(ctx
->ring_pages
);
307 ctx
->ring_pages
= NULL
;
311 static int aio_ring_mmap(struct file
*file
, struct vm_area_struct
*vma
)
313 vma
->vm_flags
|= VM_DONTEXPAND
;
314 vma
->vm_ops
= &generic_file_vm_ops
;
318 static int aio_ring_remap(struct file
*file
, struct vm_area_struct
*vma
)
320 struct mm_struct
*mm
= vma
->vm_mm
;
321 struct kioctx_table
*table
;
322 int i
, res
= -EINVAL
;
324 spin_lock(&mm
->ioctx_lock
);
326 table
= rcu_dereference(mm
->ioctx_table
);
327 for (i
= 0; i
< table
->nr
; i
++) {
330 ctx
= table
->table
[i
];
331 if (ctx
&& ctx
->aio_ring_file
== file
) {
332 if (!atomic_read(&ctx
->dead
)) {
333 ctx
->user_id
= ctx
->mmap_base
= vma
->vm_start
;
341 spin_unlock(&mm
->ioctx_lock
);
345 static const struct file_operations aio_ring_fops
= {
346 .mmap
= aio_ring_mmap
,
347 .mremap
= aio_ring_remap
,
350 #if IS_ENABLED(CONFIG_MIGRATION)
351 static int aio_migratepage(struct address_space
*mapping
, struct page
*new,
352 struct page
*old
, enum migrate_mode mode
)
361 /* mapping->private_lock here protects against the kioctx teardown. */
362 spin_lock(&mapping
->private_lock
);
363 ctx
= mapping
->private_data
;
369 /* The ring_lock mutex. The prevents aio_read_events() from writing
370 * to the ring's head, and prevents page migration from mucking in
371 * a partially initialized kiotx.
373 if (!mutex_trylock(&ctx
->ring_lock
)) {
379 if (idx
< (pgoff_t
)ctx
->nr_pages
) {
380 /* Make sure the old page hasn't already been changed */
381 if (ctx
->ring_pages
[idx
] != old
)
389 /* Writeback must be complete */
390 BUG_ON(PageWriteback(old
));
393 rc
= migrate_page_move_mapping(mapping
, new, old
, NULL
, mode
, 1);
394 if (rc
!= MIGRATEPAGE_SUCCESS
) {
399 /* Take completion_lock to prevent other writes to the ring buffer
400 * while the old page is copied to the new. This prevents new
401 * events from being lost.
403 spin_lock_irqsave(&ctx
->completion_lock
, flags
);
404 migrate_page_copy(new, old
);
405 BUG_ON(ctx
->ring_pages
[idx
] != old
);
406 ctx
->ring_pages
[idx
] = new;
407 spin_unlock_irqrestore(&ctx
->completion_lock
, flags
);
409 /* The old page is no longer accessible. */
413 mutex_unlock(&ctx
->ring_lock
);
415 spin_unlock(&mapping
->private_lock
);
420 static const struct address_space_operations aio_ctx_aops
= {
421 .set_page_dirty
= __set_page_dirty_no_writeback
,
422 #if IS_ENABLED(CONFIG_MIGRATION)
423 .migratepage
= aio_migratepage
,
427 static int aio_setup_ring(struct kioctx
*ctx
)
429 struct aio_ring
*ring
;
430 unsigned nr_events
= ctx
->max_reqs
;
431 struct mm_struct
*mm
= current
->mm
;
432 unsigned long size
, unused
;
437 /* Compensate for the ring buffer's head/tail overlap entry */
438 nr_events
+= 2; /* 1 is required, 2 for good luck */
440 size
= sizeof(struct aio_ring
);
441 size
+= sizeof(struct io_event
) * nr_events
;
443 nr_pages
= PFN_UP(size
);
447 file
= aio_private_file(ctx
, nr_pages
);
449 ctx
->aio_ring_file
= NULL
;
453 ctx
->aio_ring_file
= file
;
454 nr_events
= (PAGE_SIZE
* nr_pages
- sizeof(struct aio_ring
))
455 / sizeof(struct io_event
);
457 ctx
->ring_pages
= ctx
->internal_pages
;
458 if (nr_pages
> AIO_RING_PAGES
) {
459 ctx
->ring_pages
= kcalloc(nr_pages
, sizeof(struct page
*),
461 if (!ctx
->ring_pages
) {
462 put_aio_ring_file(ctx
);
467 for (i
= 0; i
< nr_pages
; i
++) {
469 page
= find_or_create_page(file
->f_inode
->i_mapping
,
470 i
, GFP_HIGHUSER
| __GFP_ZERO
);
473 pr_debug("pid(%d) page[%d]->count=%d\n",
474 current
->pid
, i
, page_count(page
));
475 SetPageUptodate(page
);
478 ctx
->ring_pages
[i
] = page
;
482 if (unlikely(i
!= nr_pages
)) {
487 ctx
->mmap_size
= nr_pages
* PAGE_SIZE
;
488 pr_debug("attempting mmap of %lu bytes\n", ctx
->mmap_size
);
490 down_write(&mm
->mmap_sem
);
491 ctx
->mmap_base
= do_mmap_pgoff(ctx
->aio_ring_file
, 0, ctx
->mmap_size
,
492 PROT_READ
| PROT_WRITE
,
493 MAP_SHARED
, 0, &unused
);
494 up_write(&mm
->mmap_sem
);
495 if (IS_ERR((void *)ctx
->mmap_base
)) {
501 pr_debug("mmap address: 0x%08lx\n", ctx
->mmap_base
);
503 ctx
->user_id
= ctx
->mmap_base
;
504 ctx
->nr_events
= nr_events
; /* trusted copy */
506 ring
= kmap_atomic(ctx
->ring_pages
[0]);
507 ring
->nr
= nr_events
; /* user copy */
509 ring
->head
= ring
->tail
= 0;
510 ring
->magic
= AIO_RING_MAGIC
;
511 ring
->compat_features
= AIO_RING_COMPAT_FEATURES
;
512 ring
->incompat_features
= AIO_RING_INCOMPAT_FEATURES
;
513 ring
->header_length
= sizeof(struct aio_ring
);
515 flush_dcache_page(ctx
->ring_pages
[0]);
520 #define AIO_EVENTS_PER_PAGE (PAGE_SIZE / sizeof(struct io_event))
521 #define AIO_EVENTS_FIRST_PAGE ((PAGE_SIZE - sizeof(struct aio_ring)) / sizeof(struct io_event))
522 #define AIO_EVENTS_OFFSET (AIO_EVENTS_PER_PAGE - AIO_EVENTS_FIRST_PAGE)
524 void kiocb_set_cancel_fn(struct kiocb
*iocb
, kiocb_cancel_fn
*cancel
)
526 struct aio_kiocb
*req
= container_of(iocb
, struct aio_kiocb
, common
);
527 struct kioctx
*ctx
= req
->ki_ctx
;
530 spin_lock_irqsave(&ctx
->ctx_lock
, flags
);
532 if (!req
->ki_list
.next
)
533 list_add(&req
->ki_list
, &ctx
->active_reqs
);
535 req
->ki_cancel
= cancel
;
537 spin_unlock_irqrestore(&ctx
->ctx_lock
, flags
);
539 EXPORT_SYMBOL(kiocb_set_cancel_fn
);
541 static int kiocb_cancel(struct aio_kiocb
*kiocb
)
543 kiocb_cancel_fn
*old
, *cancel
;
546 * Don't want to set kiocb->ki_cancel = KIOCB_CANCELLED unless it
547 * actually has a cancel function, hence the cmpxchg()
550 cancel
= ACCESS_ONCE(kiocb
->ki_cancel
);
552 if (!cancel
|| cancel
== KIOCB_CANCELLED
)
556 cancel
= cmpxchg(&kiocb
->ki_cancel
, old
, KIOCB_CANCELLED
);
557 } while (cancel
!= old
);
559 return cancel(&kiocb
->common
);
562 static void free_ioctx(struct work_struct
*work
)
564 struct kioctx
*ctx
= container_of(work
, struct kioctx
, free_work
);
566 pr_debug("freeing %p\n", ctx
);
569 free_percpu(ctx
->cpu
);
570 percpu_ref_exit(&ctx
->reqs
);
571 percpu_ref_exit(&ctx
->users
);
572 kmem_cache_free(kioctx_cachep
, ctx
);
575 static void free_ioctx_reqs(struct percpu_ref
*ref
)
577 struct kioctx
*ctx
= container_of(ref
, struct kioctx
, reqs
);
579 /* At this point we know that there are no any in-flight requests */
580 if (ctx
->rq_wait
&& atomic_dec_and_test(&ctx
->rq_wait
->count
))
581 complete(&ctx
->rq_wait
->comp
);
583 INIT_WORK(&ctx
->free_work
, free_ioctx
);
584 schedule_work(&ctx
->free_work
);
588 * When this function runs, the kioctx has been removed from the "hash table"
589 * and ctx->users has dropped to 0, so we know no more kiocbs can be submitted -
590 * now it's safe to cancel any that need to be.
592 static void free_ioctx_users(struct percpu_ref
*ref
)
594 struct kioctx
*ctx
= container_of(ref
, struct kioctx
, users
);
595 struct aio_kiocb
*req
;
597 spin_lock_irq(&ctx
->ctx_lock
);
599 while (!list_empty(&ctx
->active_reqs
)) {
600 req
= list_first_entry(&ctx
->active_reqs
,
601 struct aio_kiocb
, ki_list
);
603 list_del_init(&req
->ki_list
);
607 spin_unlock_irq(&ctx
->ctx_lock
);
609 percpu_ref_kill(&ctx
->reqs
);
610 percpu_ref_put(&ctx
->reqs
);
613 static int ioctx_add_table(struct kioctx
*ctx
, struct mm_struct
*mm
)
616 struct kioctx_table
*table
, *old
;
617 struct aio_ring
*ring
;
619 spin_lock(&mm
->ioctx_lock
);
620 table
= rcu_dereference_raw(mm
->ioctx_table
);
624 for (i
= 0; i
< table
->nr
; i
++)
625 if (!table
->table
[i
]) {
627 table
->table
[i
] = ctx
;
628 spin_unlock(&mm
->ioctx_lock
);
630 /* While kioctx setup is in progress,
631 * we are protected from page migration
632 * changes ring_pages by ->ring_lock.
634 ring
= kmap_atomic(ctx
->ring_pages
[0]);
640 new_nr
= (table
? table
->nr
: 1) * 4;
641 spin_unlock(&mm
->ioctx_lock
);
643 table
= kzalloc(sizeof(*table
) + sizeof(struct kioctx
*) *
650 spin_lock(&mm
->ioctx_lock
);
651 old
= rcu_dereference_raw(mm
->ioctx_table
);
654 rcu_assign_pointer(mm
->ioctx_table
, table
);
655 } else if (table
->nr
> old
->nr
) {
656 memcpy(table
->table
, old
->table
,
657 old
->nr
* sizeof(struct kioctx
*));
659 rcu_assign_pointer(mm
->ioctx_table
, table
);
668 static void aio_nr_sub(unsigned nr
)
670 spin_lock(&aio_nr_lock
);
671 if (WARN_ON(aio_nr
- nr
> aio_nr
))
675 spin_unlock(&aio_nr_lock
);
679 * Allocates and initializes an ioctx. Returns an ERR_PTR if it failed.
681 static struct kioctx
*ioctx_alloc(unsigned nr_events
)
683 struct mm_struct
*mm
= current
->mm
;
688 * We keep track of the number of available ringbuffer slots, to prevent
689 * overflow (reqs_available), and we also use percpu counters for this.
691 * So since up to half the slots might be on other cpu's percpu counters
692 * and unavailable, double nr_events so userspace sees what they
693 * expected: additionally, we move req_batch slots to/from percpu
694 * counters at a time, so make sure that isn't 0:
696 nr_events
= max(nr_events
, num_possible_cpus() * 4);
699 /* Prevent overflows */
700 if (nr_events
> (0x10000000U
/ sizeof(struct io_event
))) {
701 pr_debug("ENOMEM: nr_events too high\n");
702 return ERR_PTR(-EINVAL
);
705 if (!nr_events
|| (unsigned long)nr_events
> (aio_max_nr
* 2UL))
706 return ERR_PTR(-EAGAIN
);
708 ctx
= kmem_cache_zalloc(kioctx_cachep
, GFP_KERNEL
);
710 return ERR_PTR(-ENOMEM
);
712 ctx
->max_reqs
= nr_events
;
714 spin_lock_init(&ctx
->ctx_lock
);
715 spin_lock_init(&ctx
->completion_lock
);
716 mutex_init(&ctx
->ring_lock
);
717 /* Protect against page migration throughout kiotx setup by keeping
718 * the ring_lock mutex held until setup is complete. */
719 mutex_lock(&ctx
->ring_lock
);
720 init_waitqueue_head(&ctx
->wait
);
722 INIT_LIST_HEAD(&ctx
->active_reqs
);
724 if (percpu_ref_init(&ctx
->users
, free_ioctx_users
, 0, GFP_KERNEL
))
727 if (percpu_ref_init(&ctx
->reqs
, free_ioctx_reqs
, 0, GFP_KERNEL
))
730 ctx
->cpu
= alloc_percpu(struct kioctx_cpu
);
734 err
= aio_setup_ring(ctx
);
738 atomic_set(&ctx
->reqs_available
, ctx
->nr_events
- 1);
739 ctx
->req_batch
= (ctx
->nr_events
- 1) / (num_possible_cpus() * 4);
740 if (ctx
->req_batch
< 1)
743 /* limit the number of system wide aios */
744 spin_lock(&aio_nr_lock
);
745 if (aio_nr
+ nr_events
> (aio_max_nr
* 2UL) ||
746 aio_nr
+ nr_events
< aio_nr
) {
747 spin_unlock(&aio_nr_lock
);
751 aio_nr
+= ctx
->max_reqs
;
752 spin_unlock(&aio_nr_lock
);
754 percpu_ref_get(&ctx
->users
); /* io_setup() will drop this ref */
755 percpu_ref_get(&ctx
->reqs
); /* free_ioctx_users() will drop this */
757 err
= ioctx_add_table(ctx
, mm
);
761 /* Release the ring_lock mutex now that all setup is complete. */
762 mutex_unlock(&ctx
->ring_lock
);
764 pr_debug("allocated ioctx %p[%ld]: mm=%p mask=0x%x\n",
765 ctx
, ctx
->user_id
, mm
, ctx
->nr_events
);
769 aio_nr_sub(ctx
->max_reqs
);
771 atomic_set(&ctx
->dead
, 1);
773 vm_munmap(ctx
->mmap_base
, ctx
->mmap_size
);
776 mutex_unlock(&ctx
->ring_lock
);
777 free_percpu(ctx
->cpu
);
778 percpu_ref_exit(&ctx
->reqs
);
779 percpu_ref_exit(&ctx
->users
);
780 kmem_cache_free(kioctx_cachep
, ctx
);
781 pr_debug("error allocating ioctx %d\n", err
);
786 * Cancels all outstanding aio requests on an aio context. Used
787 * when the processes owning a context have all exited to encourage
788 * the rapid destruction of the kioctx.
790 static int kill_ioctx(struct mm_struct
*mm
, struct kioctx
*ctx
,
791 struct ctx_rq_wait
*wait
)
793 struct kioctx_table
*table
;
795 spin_lock(&mm
->ioctx_lock
);
796 if (atomic_xchg(&ctx
->dead
, 1)) {
797 spin_unlock(&mm
->ioctx_lock
);
801 table
= rcu_dereference_raw(mm
->ioctx_table
);
802 WARN_ON(ctx
!= table
->table
[ctx
->id
]);
803 table
->table
[ctx
->id
] = NULL
;
804 spin_unlock(&mm
->ioctx_lock
);
806 /* percpu_ref_kill() will do the necessary call_rcu() */
807 wake_up_all(&ctx
->wait
);
810 * It'd be more correct to do this in free_ioctx(), after all
811 * the outstanding kiocbs have finished - but by then io_destroy
812 * has already returned, so io_setup() could potentially return
813 * -EAGAIN with no ioctxs actually in use (as far as userspace
816 aio_nr_sub(ctx
->max_reqs
);
819 vm_munmap(ctx
->mmap_base
, ctx
->mmap_size
);
822 percpu_ref_kill(&ctx
->users
);
827 * exit_aio: called when the last user of mm goes away. At this point, there is
828 * no way for any new requests to be submited or any of the io_* syscalls to be
829 * called on the context.
831 * There may be outstanding kiocbs, but free_ioctx() will explicitly wait on
834 void exit_aio(struct mm_struct
*mm
)
836 struct kioctx_table
*table
= rcu_dereference_raw(mm
->ioctx_table
);
837 struct ctx_rq_wait wait
;
843 atomic_set(&wait
.count
, table
->nr
);
844 init_completion(&wait
.comp
);
847 for (i
= 0; i
< table
->nr
; ++i
) {
848 struct kioctx
*ctx
= table
->table
[i
];
856 * We don't need to bother with munmap() here - exit_mmap(mm)
857 * is coming and it'll unmap everything. And we simply can't,
858 * this is not necessarily our ->mm.
859 * Since kill_ioctx() uses non-zero ->mmap_size as indicator
860 * that it needs to unmap the area, just set it to 0.
863 kill_ioctx(mm
, ctx
, &wait
);
866 if (!atomic_sub_and_test(skipped
, &wait
.count
)) {
867 /* Wait until all IO for the context are done. */
868 wait_for_completion(&wait
.comp
);
871 RCU_INIT_POINTER(mm
->ioctx_table
, NULL
);
875 static void put_reqs_available(struct kioctx
*ctx
, unsigned nr
)
877 struct kioctx_cpu
*kcpu
;
880 local_irq_save(flags
);
881 kcpu
= this_cpu_ptr(ctx
->cpu
);
882 kcpu
->reqs_available
+= nr
;
884 while (kcpu
->reqs_available
>= ctx
->req_batch
* 2) {
885 kcpu
->reqs_available
-= ctx
->req_batch
;
886 atomic_add(ctx
->req_batch
, &ctx
->reqs_available
);
889 local_irq_restore(flags
);
892 static bool get_reqs_available(struct kioctx
*ctx
)
894 struct kioctx_cpu
*kcpu
;
898 local_irq_save(flags
);
899 kcpu
= this_cpu_ptr(ctx
->cpu
);
900 if (!kcpu
->reqs_available
) {
901 int old
, avail
= atomic_read(&ctx
->reqs_available
);
904 if (avail
< ctx
->req_batch
)
908 avail
= atomic_cmpxchg(&ctx
->reqs_available
,
909 avail
, avail
- ctx
->req_batch
);
910 } while (avail
!= old
);
912 kcpu
->reqs_available
+= ctx
->req_batch
;
916 kcpu
->reqs_available
--;
918 local_irq_restore(flags
);
922 /* refill_reqs_available
923 * Updates the reqs_available reference counts used for tracking the
924 * number of free slots in the completion ring. This can be called
925 * from aio_complete() (to optimistically update reqs_available) or
926 * from aio_get_req() (the we're out of events case). It must be
927 * called holding ctx->completion_lock.
929 static void refill_reqs_available(struct kioctx
*ctx
, unsigned head
,
932 unsigned events_in_ring
, completed
;
934 /* Clamp head since userland can write to it. */
935 head
%= ctx
->nr_events
;
937 events_in_ring
= tail
- head
;
939 events_in_ring
= ctx
->nr_events
- (head
- tail
);
941 completed
= ctx
->completed_events
;
942 if (events_in_ring
< completed
)
943 completed
-= events_in_ring
;
950 ctx
->completed_events
-= completed
;
951 put_reqs_available(ctx
, completed
);
954 /* user_refill_reqs_available
955 * Called to refill reqs_available when aio_get_req() encounters an
956 * out of space in the completion ring.
958 static void user_refill_reqs_available(struct kioctx
*ctx
)
960 spin_lock_irq(&ctx
->completion_lock
);
961 if (ctx
->completed_events
) {
962 struct aio_ring
*ring
;
965 /* Access of ring->head may race with aio_read_events_ring()
966 * here, but that's okay since whether we read the old version
967 * or the new version, and either will be valid. The important
968 * part is that head cannot pass tail since we prevent
969 * aio_complete() from updating tail by holding
970 * ctx->completion_lock. Even if head is invalid, the check
971 * against ctx->completed_events below will make sure we do the
974 ring
= kmap_atomic(ctx
->ring_pages
[0]);
978 refill_reqs_available(ctx
, head
, ctx
->tail
);
981 spin_unlock_irq(&ctx
->completion_lock
);
985 * Allocate a slot for an aio request.
986 * Returns NULL if no requests are free.
988 static inline struct aio_kiocb
*aio_get_req(struct kioctx
*ctx
)
990 struct aio_kiocb
*req
;
992 if (!get_reqs_available(ctx
)) {
993 user_refill_reqs_available(ctx
);
994 if (!get_reqs_available(ctx
))
998 req
= kmem_cache_alloc(kiocb_cachep
, GFP_KERNEL
|__GFP_ZERO
);
1002 percpu_ref_get(&ctx
->reqs
);
1007 put_reqs_available(ctx
, 1);
1011 static void kiocb_free(struct aio_kiocb
*req
)
1013 if (req
->common
.ki_filp
)
1014 fput(req
->common
.ki_filp
);
1015 if (req
->ki_eventfd
!= NULL
)
1016 eventfd_ctx_put(req
->ki_eventfd
);
1017 kmem_cache_free(kiocb_cachep
, req
);
1020 static struct kioctx
*lookup_ioctx(unsigned long ctx_id
)
1022 struct aio_ring __user
*ring
= (void __user
*)ctx_id
;
1023 struct mm_struct
*mm
= current
->mm
;
1024 struct kioctx
*ctx
, *ret
= NULL
;
1025 struct kioctx_table
*table
;
1028 if (get_user(id
, &ring
->id
))
1032 table
= rcu_dereference(mm
->ioctx_table
);
1034 if (!table
|| id
>= table
->nr
)
1037 ctx
= table
->table
[id
];
1038 if (ctx
&& ctx
->user_id
== ctx_id
) {
1039 percpu_ref_get(&ctx
->users
);
1048 * Called when the io request on the given iocb is complete.
1050 static void aio_complete(struct kiocb
*kiocb
, long res
, long res2
)
1052 struct aio_kiocb
*iocb
= container_of(kiocb
, struct aio_kiocb
, common
);
1053 struct kioctx
*ctx
= iocb
->ki_ctx
;
1054 struct aio_ring
*ring
;
1055 struct io_event
*ev_page
, *event
;
1056 unsigned tail
, pos
, head
;
1057 unsigned long flags
;
1060 * Special case handling for sync iocbs:
1061 * - events go directly into the iocb for fast handling
1062 * - the sync task with the iocb in its stack holds the single iocb
1063 * ref, no other paths have a way to get another ref
1064 * - the sync task helpfully left a reference to itself in the iocb
1066 BUG_ON(is_sync_kiocb(kiocb
));
1068 if (iocb
->ki_list
.next
) {
1069 unsigned long flags
;
1071 spin_lock_irqsave(&ctx
->ctx_lock
, flags
);
1072 list_del(&iocb
->ki_list
);
1073 spin_unlock_irqrestore(&ctx
->ctx_lock
, flags
);
1077 * Add a completion event to the ring buffer. Must be done holding
1078 * ctx->completion_lock to prevent other code from messing with the tail
1079 * pointer since we might be called from irq context.
1081 spin_lock_irqsave(&ctx
->completion_lock
, flags
);
1084 pos
= tail
+ AIO_EVENTS_OFFSET
;
1086 if (++tail
>= ctx
->nr_events
)
1089 ev_page
= kmap_atomic(ctx
->ring_pages
[pos
/ AIO_EVENTS_PER_PAGE
]);
1090 event
= ev_page
+ pos
% AIO_EVENTS_PER_PAGE
;
1092 event
->obj
= (u64
)(unsigned long)iocb
->ki_user_iocb
;
1093 event
->data
= iocb
->ki_user_data
;
1097 kunmap_atomic(ev_page
);
1098 flush_dcache_page(ctx
->ring_pages
[pos
/ AIO_EVENTS_PER_PAGE
]);
1100 pr_debug("%p[%u]: %p: %p %Lx %lx %lx\n",
1101 ctx
, tail
, iocb
, iocb
->ki_user_iocb
, iocb
->ki_user_data
,
1104 /* after flagging the request as done, we
1105 * must never even look at it again
1107 smp_wmb(); /* make event visible before updating tail */
1111 ring
= kmap_atomic(ctx
->ring_pages
[0]);
1114 kunmap_atomic(ring
);
1115 flush_dcache_page(ctx
->ring_pages
[0]);
1117 ctx
->completed_events
++;
1118 if (ctx
->completed_events
> 1)
1119 refill_reqs_available(ctx
, head
, tail
);
1120 spin_unlock_irqrestore(&ctx
->completion_lock
, flags
);
1122 pr_debug("added to ring %p at [%u]\n", iocb
, tail
);
1125 * Check if the user asked us to deliver the result through an
1126 * eventfd. The eventfd_signal() function is safe to be called
1129 if (iocb
->ki_eventfd
!= NULL
)
1130 eventfd_signal(iocb
->ki_eventfd
, 1);
1132 /* everything turned out well, dispose of the aiocb. */
1136 * We have to order our ring_info tail store above and test
1137 * of the wait list below outside the wait lock. This is
1138 * like in wake_up_bit() where clearing a bit has to be
1139 * ordered with the unlocked test.
1143 if (waitqueue_active(&ctx
->wait
))
1144 wake_up(&ctx
->wait
);
1146 percpu_ref_put(&ctx
->reqs
);
1149 /* aio_read_events_ring
1150 * Pull an event off of the ioctx's event ring. Returns the number of
1153 static long aio_read_events_ring(struct kioctx
*ctx
,
1154 struct io_event __user
*event
, long nr
)
1156 struct aio_ring
*ring
;
1157 unsigned head
, tail
, pos
;
1162 * The mutex can block and wake us up and that will cause
1163 * wait_event_interruptible_hrtimeout() to schedule without sleeping
1164 * and repeat. This should be rare enough that it doesn't cause
1165 * peformance issues. See the comment in read_events() for more detail.
1167 sched_annotate_sleep();
1168 mutex_lock(&ctx
->ring_lock
);
1170 /* Access to ->ring_pages here is protected by ctx->ring_lock. */
1171 ring
= kmap_atomic(ctx
->ring_pages
[0]);
1174 kunmap_atomic(ring
);
1177 * Ensure that once we've read the current tail pointer, that
1178 * we also see the events that were stored up to the tail.
1182 pr_debug("h%u t%u m%u\n", head
, tail
, ctx
->nr_events
);
1187 head
%= ctx
->nr_events
;
1188 tail
%= ctx
->nr_events
;
1192 struct io_event
*ev
;
1195 avail
= (head
<= tail
? tail
: ctx
->nr_events
) - head
;
1199 avail
= min(avail
, nr
- ret
);
1200 avail
= min_t(long, avail
, AIO_EVENTS_PER_PAGE
-
1201 ((head
+ AIO_EVENTS_OFFSET
) % AIO_EVENTS_PER_PAGE
));
1203 pos
= head
+ AIO_EVENTS_OFFSET
;
1204 page
= ctx
->ring_pages
[pos
/ AIO_EVENTS_PER_PAGE
];
1205 pos
%= AIO_EVENTS_PER_PAGE
;
1208 copy_ret
= copy_to_user(event
+ ret
, ev
+ pos
,
1209 sizeof(*ev
) * avail
);
1212 if (unlikely(copy_ret
)) {
1219 head
%= ctx
->nr_events
;
1222 ring
= kmap_atomic(ctx
->ring_pages
[0]);
1224 kunmap_atomic(ring
);
1225 flush_dcache_page(ctx
->ring_pages
[0]);
1227 pr_debug("%li h%u t%u\n", ret
, head
, tail
);
1229 mutex_unlock(&ctx
->ring_lock
);
1234 static bool aio_read_events(struct kioctx
*ctx
, long min_nr
, long nr
,
1235 struct io_event __user
*event
, long *i
)
1237 long ret
= aio_read_events_ring(ctx
, event
+ *i
, nr
- *i
);
1242 if (unlikely(atomic_read(&ctx
->dead
)))
1248 return ret
< 0 || *i
>= min_nr
;
1251 static long read_events(struct kioctx
*ctx
, long min_nr
, long nr
,
1252 struct io_event __user
*event
,
1253 struct timespec __user
*timeout
)
1255 ktime_t until
= { .tv64
= KTIME_MAX
};
1261 if (unlikely(copy_from_user(&ts
, timeout
, sizeof(ts
))))
1264 until
= timespec_to_ktime(ts
);
1268 * Note that aio_read_events() is being called as the conditional - i.e.
1269 * we're calling it after prepare_to_wait() has set task state to
1270 * TASK_INTERRUPTIBLE.
1272 * But aio_read_events() can block, and if it blocks it's going to flip
1273 * the task state back to TASK_RUNNING.
1275 * This should be ok, provided it doesn't flip the state back to
1276 * TASK_RUNNING and return 0 too much - that causes us to spin. That
1277 * will only happen if the mutex_lock() call blocks, and we then find
1278 * the ringbuffer empty. So in practice we should be ok, but it's
1279 * something to be aware of when touching this code.
1281 if (until
.tv64
== 0)
1282 aio_read_events(ctx
, min_nr
, nr
, event
, &ret
);
1284 wait_event_interruptible_hrtimeout(ctx
->wait
,
1285 aio_read_events(ctx
, min_nr
, nr
, event
, &ret
),
1288 if (!ret
&& signal_pending(current
))
1295 * Create an aio_context capable of receiving at least nr_events.
1296 * ctxp must not point to an aio_context that already exists, and
1297 * must be initialized to 0 prior to the call. On successful
1298 * creation of the aio_context, *ctxp is filled in with the resulting
1299 * handle. May fail with -EINVAL if *ctxp is not initialized,
1300 * if the specified nr_events exceeds internal limits. May fail
1301 * with -EAGAIN if the specified nr_events exceeds the user's limit
1302 * of available events. May fail with -ENOMEM if insufficient kernel
1303 * resources are available. May fail with -EFAULT if an invalid
1304 * pointer is passed for ctxp. Will fail with -ENOSYS if not
1307 SYSCALL_DEFINE2(io_setup
, unsigned, nr_events
, aio_context_t __user
*, ctxp
)
1309 struct kioctx
*ioctx
= NULL
;
1313 ret
= get_user(ctx
, ctxp
);
1318 if (unlikely(ctx
|| nr_events
== 0)) {
1319 pr_debug("EINVAL: ctx %lu nr_events %u\n",
1324 ioctx
= ioctx_alloc(nr_events
);
1325 ret
= PTR_ERR(ioctx
);
1326 if (!IS_ERR(ioctx
)) {
1327 ret
= put_user(ioctx
->user_id
, ctxp
);
1329 kill_ioctx(current
->mm
, ioctx
, NULL
);
1330 percpu_ref_put(&ioctx
->users
);
1338 * Destroy the aio_context specified. May cancel any outstanding
1339 * AIOs and block on completion. Will fail with -ENOSYS if not
1340 * implemented. May fail with -EINVAL if the context pointed to
1343 SYSCALL_DEFINE1(io_destroy
, aio_context_t
, ctx
)
1345 struct kioctx
*ioctx
= lookup_ioctx(ctx
);
1346 if (likely(NULL
!= ioctx
)) {
1347 struct ctx_rq_wait wait
;
1350 init_completion(&wait
.comp
);
1351 atomic_set(&wait
.count
, 1);
1353 /* Pass requests_done to kill_ioctx() where it can be set
1354 * in a thread-safe way. If we try to set it here then we have
1355 * a race condition if two io_destroy() called simultaneously.
1357 ret
= kill_ioctx(current
->mm
, ioctx
, &wait
);
1358 percpu_ref_put(&ioctx
->users
);
1360 /* Wait until all IO for the context are done. Otherwise kernel
1361 * keep using user-space buffers even if user thinks the context
1365 wait_for_completion(&wait
.comp
);
1369 pr_debug("EINVAL: invalid context id\n");
1373 typedef ssize_t (rw_iter_op
)(struct kiocb
*, struct iov_iter
*);
1375 static int aio_setup_vectored_rw(int rw
, char __user
*buf
, size_t len
,
1376 struct iovec
**iovec
,
1378 struct iov_iter
*iter
)
1380 #ifdef CONFIG_COMPAT
1382 return compat_import_iovec(rw
,
1383 (struct compat_iovec __user
*)buf
,
1384 len
, UIO_FASTIOV
, iovec
, iter
);
1386 return import_iovec(rw
, (struct iovec __user
*)buf
,
1387 len
, UIO_FASTIOV
, iovec
, iter
);
1392 * Performs the initial checks and io submission.
1394 static ssize_t
aio_run_iocb(struct kiocb
*req
, unsigned opcode
,
1395 char __user
*buf
, size_t len
, bool compat
)
1397 struct file
*file
= req
->ki_filp
;
1401 rw_iter_op
*iter_op
;
1402 struct iovec inline_vecs
[UIO_FASTIOV
], *iovec
= inline_vecs
;
1403 struct iov_iter iter
;
1406 case IOCB_CMD_PREAD
:
1407 case IOCB_CMD_PREADV
:
1410 iter_op
= file
->f_op
->read_iter
;
1413 case IOCB_CMD_PWRITE
:
1414 case IOCB_CMD_PWRITEV
:
1417 iter_op
= file
->f_op
->write_iter
;
1420 if (unlikely(!(file
->f_mode
& mode
)))
1426 if (opcode
== IOCB_CMD_PREADV
|| opcode
== IOCB_CMD_PWRITEV
)
1427 ret
= aio_setup_vectored_rw(rw
, buf
, len
,
1428 &iovec
, compat
, &iter
);
1430 ret
= import_single_range(rw
, buf
, len
, iovec
, &iter
);
1434 ret
= rw_verify_area(rw
, file
, &req
->ki_pos
,
1435 iov_iter_count(&iter
));
1444 file_start_write(file
);
1446 ret
= iter_op(req
, &iter
);
1449 file_end_write(file
);
1453 case IOCB_CMD_FDSYNC
:
1454 if (!file
->f_op
->aio_fsync
)
1457 ret
= file
->f_op
->aio_fsync(req
, 1);
1460 case IOCB_CMD_FSYNC
:
1461 if (!file
->f_op
->aio_fsync
)
1464 ret
= file
->f_op
->aio_fsync(req
, 0);
1468 pr_debug("EINVAL: no operation provided\n");
1472 if (ret
!= -EIOCBQUEUED
) {
1474 * There's no easy way to restart the syscall since other AIO's
1475 * may be already running. Just fail this IO with EINTR.
1477 if (unlikely(ret
== -ERESTARTSYS
|| ret
== -ERESTARTNOINTR
||
1478 ret
== -ERESTARTNOHAND
||
1479 ret
== -ERESTART_RESTARTBLOCK
))
1481 aio_complete(req
, ret
, 0);
1487 static int io_submit_one(struct kioctx
*ctx
, struct iocb __user
*user_iocb
,
1488 struct iocb
*iocb
, bool compat
)
1490 struct aio_kiocb
*req
;
1493 /* enforce forwards compatibility on users */
1494 if (unlikely(iocb
->aio_reserved1
|| iocb
->aio_reserved2
)) {
1495 pr_debug("EINVAL: reserve field set\n");
1499 /* prevent overflows */
1501 (iocb
->aio_buf
!= (unsigned long)iocb
->aio_buf
) ||
1502 (iocb
->aio_nbytes
!= (size_t)iocb
->aio_nbytes
) ||
1503 ((ssize_t
)iocb
->aio_nbytes
< 0)
1505 pr_debug("EINVAL: overflow check\n");
1509 req
= aio_get_req(ctx
);
1513 req
->common
.ki_filp
= fget(iocb
->aio_fildes
);
1514 if (unlikely(!req
->common
.ki_filp
)) {
1518 req
->common
.ki_pos
= iocb
->aio_offset
;
1519 req
->common
.ki_complete
= aio_complete
;
1520 req
->common
.ki_flags
= iocb_flags(req
->common
.ki_filp
);
1522 if (iocb
->aio_flags
& IOCB_FLAG_RESFD
) {
1524 * If the IOCB_FLAG_RESFD flag of aio_flags is set, get an
1525 * instance of the file* now. The file descriptor must be
1526 * an eventfd() fd, and will be signaled for each completed
1527 * event using the eventfd_signal() function.
1529 req
->ki_eventfd
= eventfd_ctx_fdget((int) iocb
->aio_resfd
);
1530 if (IS_ERR(req
->ki_eventfd
)) {
1531 ret
= PTR_ERR(req
->ki_eventfd
);
1532 req
->ki_eventfd
= NULL
;
1536 req
->common
.ki_flags
|= IOCB_EVENTFD
;
1539 ret
= put_user(KIOCB_KEY
, &user_iocb
->aio_key
);
1540 if (unlikely(ret
)) {
1541 pr_debug("EFAULT: aio_key\n");
1545 req
->ki_user_iocb
= user_iocb
;
1546 req
->ki_user_data
= iocb
->aio_data
;
1548 ret
= aio_run_iocb(&req
->common
, iocb
->aio_lio_opcode
,
1549 (char __user
*)(unsigned long)iocb
->aio_buf
,
1557 put_reqs_available(ctx
, 1);
1558 percpu_ref_put(&ctx
->reqs
);
1563 long do_io_submit(aio_context_t ctx_id
, long nr
,
1564 struct iocb __user
*__user
*iocbpp
, bool compat
)
1569 struct blk_plug plug
;
1571 if (unlikely(nr
< 0))
1574 if (unlikely(nr
> LONG_MAX
/sizeof(*iocbpp
)))
1575 nr
= LONG_MAX
/sizeof(*iocbpp
);
1577 if (unlikely(!access_ok(VERIFY_READ
, iocbpp
, (nr
*sizeof(*iocbpp
)))))
1580 ctx
= lookup_ioctx(ctx_id
);
1581 if (unlikely(!ctx
)) {
1582 pr_debug("EINVAL: invalid context id\n");
1586 blk_start_plug(&plug
);
1589 * AKPM: should this return a partial result if some of the IOs were
1590 * successfully submitted?
1592 for (i
=0; i
<nr
; i
++) {
1593 struct iocb __user
*user_iocb
;
1596 if (unlikely(__get_user(user_iocb
, iocbpp
+ i
))) {
1601 if (unlikely(copy_from_user(&tmp
, user_iocb
, sizeof(tmp
)))) {
1606 ret
= io_submit_one(ctx
, user_iocb
, &tmp
, compat
);
1610 blk_finish_plug(&plug
);
1612 percpu_ref_put(&ctx
->users
);
1617 * Queue the nr iocbs pointed to by iocbpp for processing. Returns
1618 * the number of iocbs queued. May return -EINVAL if the aio_context
1619 * specified by ctx_id is invalid, if nr is < 0, if the iocb at
1620 * *iocbpp[0] is not properly initialized, if the operation specified
1621 * is invalid for the file descriptor in the iocb. May fail with
1622 * -EFAULT if any of the data structures point to invalid data. May
1623 * fail with -EBADF if the file descriptor specified in the first
1624 * iocb is invalid. May fail with -EAGAIN if insufficient resources
1625 * are available to queue any iocbs. Will return 0 if nr is 0. Will
1626 * fail with -ENOSYS if not implemented.
1628 SYSCALL_DEFINE3(io_submit
, aio_context_t
, ctx_id
, long, nr
,
1629 struct iocb __user
* __user
*, iocbpp
)
1631 return do_io_submit(ctx_id
, nr
, iocbpp
, 0);
1635 * Finds a given iocb for cancellation.
1637 static struct aio_kiocb
*
1638 lookup_kiocb(struct kioctx
*ctx
, struct iocb __user
*iocb
, u32 key
)
1640 struct aio_kiocb
*kiocb
;
1642 assert_spin_locked(&ctx
->ctx_lock
);
1644 if (key
!= KIOCB_KEY
)
1647 /* TODO: use a hash or array, this sucks. */
1648 list_for_each_entry(kiocb
, &ctx
->active_reqs
, ki_list
) {
1649 if (kiocb
->ki_user_iocb
== iocb
)
1656 * Attempts to cancel an iocb previously passed to io_submit. If
1657 * the operation is successfully cancelled, the resulting event is
1658 * copied into the memory pointed to by result without being placed
1659 * into the completion queue and 0 is returned. May fail with
1660 * -EFAULT if any of the data structures pointed to are invalid.
1661 * May fail with -EINVAL if aio_context specified by ctx_id is
1662 * invalid. May fail with -EAGAIN if the iocb specified was not
1663 * cancelled. Will fail with -ENOSYS if not implemented.
1665 SYSCALL_DEFINE3(io_cancel
, aio_context_t
, ctx_id
, struct iocb __user
*, iocb
,
1666 struct io_event __user
*, result
)
1669 struct aio_kiocb
*kiocb
;
1673 ret
= get_user(key
, &iocb
->aio_key
);
1677 ctx
= lookup_ioctx(ctx_id
);
1681 spin_lock_irq(&ctx
->ctx_lock
);
1683 kiocb
= lookup_kiocb(ctx
, iocb
, key
);
1685 ret
= kiocb_cancel(kiocb
);
1689 spin_unlock_irq(&ctx
->ctx_lock
);
1693 * The result argument is no longer used - the io_event is
1694 * always delivered via the ring buffer. -EINPROGRESS indicates
1695 * cancellation is progress:
1700 percpu_ref_put(&ctx
->users
);
1706 * Attempts to read at least min_nr events and up to nr events from
1707 * the completion queue for the aio_context specified by ctx_id. If
1708 * it succeeds, the number of read events is returned. May fail with
1709 * -EINVAL if ctx_id is invalid, if min_nr is out of range, if nr is
1710 * out of range, if timeout is out of range. May fail with -EFAULT
1711 * if any of the memory specified is invalid. May return 0 or
1712 * < min_nr if the timeout specified by timeout has elapsed
1713 * before sufficient events are available, where timeout == NULL
1714 * specifies an infinite timeout. Note that the timeout pointed to by
1715 * timeout is relative. Will fail with -ENOSYS if not implemented.
1717 SYSCALL_DEFINE5(io_getevents
, aio_context_t
, ctx_id
,
1720 struct io_event __user
*, events
,
1721 struct timespec __user
*, timeout
)
1723 struct kioctx
*ioctx
= lookup_ioctx(ctx_id
);
1726 if (likely(ioctx
)) {
1727 if (likely(min_nr
<= nr
&& min_nr
>= 0))
1728 ret
= read_events(ioctx
, min_nr
, nr
, events
, timeout
);
1729 percpu_ref_put(&ioctx
->users
);