2 * An async IO implementation for Linux
3 * Written by Benjamin LaHaise <bcrl@kvack.org>
5 * Implements an efficient asynchronous io interface.
7 * Copyright 2000, 2001, 2002 Red Hat, Inc. All Rights Reserved.
9 * See ../COPYING for licensing terms.
11 #define pr_fmt(fmt) "%s: " fmt, __func__
13 #include <linux/kernel.h>
14 #include <linux/init.h>
15 #include <linux/errno.h>
16 #include <linux/time.h>
17 #include <linux/aio_abi.h>
18 #include <linux/export.h>
19 #include <linux/syscalls.h>
20 #include <linux/backing-dev.h>
21 #include <linux/uio.h>
23 #include <linux/sched/signal.h>
25 #include <linux/file.h>
27 #include <linux/mman.h>
28 #include <linux/mmu_context.h>
29 #include <linux/percpu.h>
30 #include <linux/slab.h>
31 #include <linux/timer.h>
32 #include <linux/aio.h>
33 #include <linux/highmem.h>
34 #include <linux/workqueue.h>
35 #include <linux/security.h>
36 #include <linux/eventfd.h>
37 #include <linux/blkdev.h>
38 #include <linux/compat.h>
39 #include <linux/migrate.h>
40 #include <linux/ramfs.h>
41 #include <linux/percpu-refcount.h>
42 #include <linux/mount.h>
44 #include <asm/kmap_types.h>
45 #include <linux/uaccess.h>
46 #include <linux/nospec.h>
50 #define AIO_RING_MAGIC 0xa10a10a1
51 #define AIO_RING_COMPAT_FEATURES 1
52 #define AIO_RING_INCOMPAT_FEATURES 0
54 unsigned id
; /* kernel internal index number */
55 unsigned nr
; /* number of io_events */
56 unsigned head
; /* Written to by userland or under ring_lock
57 * mutex by aio_read_events_ring(). */
61 unsigned compat_features
;
62 unsigned incompat_features
;
63 unsigned header_length
; /* size of aio_ring */
66 struct io_event io_events
[0];
67 }; /* 128 bytes + ring size */
69 #define AIO_RING_PAGES 8
74 struct kioctx __rcu
*table
[];
78 unsigned reqs_available
;
82 struct completion comp
;
87 struct percpu_ref users
;
90 struct percpu_ref reqs
;
92 unsigned long user_id
;
94 struct __percpu kioctx_cpu
*cpu
;
97 * For percpu reqs_available, number of slots we move to/from global
102 * This is what userspace passed to io_setup(), it's not used for
103 * anything but counting against the global max_reqs quota.
105 * The real limit is nr_events - 1, which will be larger (see
110 /* Size of ringbuffer, in units of struct io_event */
113 unsigned long mmap_base
;
114 unsigned long mmap_size
;
116 struct page
**ring_pages
;
119 struct rcu_head free_rcu
;
120 struct work_struct free_work
; /* see free_ioctx() */
123 * signals when all in-flight requests are done
125 struct ctx_rq_wait
*rq_wait
;
129 * This counts the number of available slots in the ringbuffer,
130 * so we avoid overflowing it: it's decremented (if positive)
131 * when allocating a kiocb and incremented when the resulting
132 * io_event is pulled off the ringbuffer.
134 * We batch accesses to it with a percpu version.
136 atomic_t reqs_available
;
137 } ____cacheline_aligned_in_smp
;
141 struct list_head active_reqs
; /* used for cancellation */
142 } ____cacheline_aligned_in_smp
;
145 struct mutex ring_lock
;
146 wait_queue_head_t wait
;
147 } ____cacheline_aligned_in_smp
;
151 unsigned completed_events
;
152 spinlock_t completion_lock
;
153 } ____cacheline_aligned_in_smp
;
155 struct page
*internal_pages
[AIO_RING_PAGES
];
156 struct file
*aio_ring_file
;
162 * We use ki_cancel == KIOCB_CANCELLED to indicate that a kiocb has been either
163 * cancelled or completed (this makes a certain amount of sense because
164 * successful cancellation - io_cancel() - does deliver the completion to
167 * And since most things don't implement kiocb cancellation and we'd really like
168 * kiocb completion to be lockless when possible, we use ki_cancel to
169 * synchronize cancellation and completion - we only set it to KIOCB_CANCELLED
170 * with xchg() or cmpxchg(), see batch_complete_aio() and kiocb_cancel().
172 #define KIOCB_CANCELLED ((void *) (~0ULL))
177 struct kioctx
*ki_ctx
;
178 kiocb_cancel_fn
*ki_cancel
;
180 struct iocb __user
*ki_user_iocb
; /* user's aiocb */
181 __u64 ki_user_data
; /* user's data for completion */
183 struct list_head ki_list
; /* the aio core uses this
184 * for cancellation */
187 * If the aio_resfd field of the userspace iocb is not zero,
188 * this is the underlying eventfd context to deliver events to.
190 struct eventfd_ctx
*ki_eventfd
;
193 /*------ sysctl variables----*/
194 static DEFINE_SPINLOCK(aio_nr_lock
);
195 unsigned long aio_nr
; /* current system wide number of aio requests */
196 unsigned long aio_max_nr
= 0x10000; /* system wide maximum number of aio requests */
197 /*----end sysctl variables---*/
199 static struct kmem_cache
*kiocb_cachep
;
200 static struct kmem_cache
*kioctx_cachep
;
202 static struct vfsmount
*aio_mnt
;
204 static const struct file_operations aio_ring_fops
;
205 static const struct address_space_operations aio_ctx_aops
;
207 static struct file
*aio_private_file(struct kioctx
*ctx
, loff_t nr_pages
)
209 struct qstr
this = QSTR_INIT("[aio]", 5);
212 struct inode
*inode
= alloc_anon_inode(aio_mnt
->mnt_sb
);
214 return ERR_CAST(inode
);
216 inode
->i_mapping
->a_ops
= &aio_ctx_aops
;
217 inode
->i_mapping
->private_data
= ctx
;
218 inode
->i_size
= PAGE_SIZE
* nr_pages
;
220 path
.dentry
= d_alloc_pseudo(aio_mnt
->mnt_sb
, &this);
223 return ERR_PTR(-ENOMEM
);
225 path
.mnt
= mntget(aio_mnt
);
227 d_instantiate(path
.dentry
, inode
);
228 file
= alloc_file(&path
, FMODE_READ
| FMODE_WRITE
, &aio_ring_fops
);
234 file
->f_flags
= O_RDWR
;
238 static struct dentry
*aio_mount(struct file_system_type
*fs_type
,
239 int flags
, const char *dev_name
, void *data
)
241 static const struct dentry_operations ops
= {
242 .d_dname
= simple_dname
,
244 struct dentry
*root
= mount_pseudo(fs_type
, "aio:", NULL
, &ops
,
248 root
->d_sb
->s_iflags
|= SB_I_NOEXEC
;
253 * Creates the slab caches used by the aio routines, panic on
254 * failure as this is done early during the boot sequence.
256 static int __init
aio_setup(void)
258 static struct file_system_type aio_fs
= {
261 .kill_sb
= kill_anon_super
,
263 aio_mnt
= kern_mount(&aio_fs
);
265 panic("Failed to create aio fs mount.");
267 kiocb_cachep
= KMEM_CACHE(aio_kiocb
, SLAB_HWCACHE_ALIGN
|SLAB_PANIC
);
268 kioctx_cachep
= KMEM_CACHE(kioctx
,SLAB_HWCACHE_ALIGN
|SLAB_PANIC
);
270 pr_debug("sizeof(struct page) = %zu\n", sizeof(struct page
));
274 __initcall(aio_setup
);
276 static void put_aio_ring_file(struct kioctx
*ctx
)
278 struct file
*aio_ring_file
= ctx
->aio_ring_file
;
279 struct address_space
*i_mapping
;
282 truncate_setsize(file_inode(aio_ring_file
), 0);
284 /* Prevent further access to the kioctx from migratepages */
285 i_mapping
= aio_ring_file
->f_mapping
;
286 spin_lock(&i_mapping
->private_lock
);
287 i_mapping
->private_data
= NULL
;
288 ctx
->aio_ring_file
= NULL
;
289 spin_unlock(&i_mapping
->private_lock
);
295 static void aio_free_ring(struct kioctx
*ctx
)
299 /* Disconnect the kiotx from the ring file. This prevents future
300 * accesses to the kioctx from page migration.
302 put_aio_ring_file(ctx
);
304 for (i
= 0; i
< ctx
->nr_pages
; i
++) {
306 pr_debug("pid(%d) [%d] page->count=%d\n", current
->pid
, i
,
307 page_count(ctx
->ring_pages
[i
]));
308 page
= ctx
->ring_pages
[i
];
311 ctx
->ring_pages
[i
] = NULL
;
315 if (ctx
->ring_pages
&& ctx
->ring_pages
!= ctx
->internal_pages
) {
316 kfree(ctx
->ring_pages
);
317 ctx
->ring_pages
= NULL
;
321 static int aio_ring_mremap(struct vm_area_struct
*vma
)
323 struct file
*file
= vma
->vm_file
;
324 struct mm_struct
*mm
= vma
->vm_mm
;
325 struct kioctx_table
*table
;
326 int i
, res
= -EINVAL
;
328 spin_lock(&mm
->ioctx_lock
);
330 table
= rcu_dereference(mm
->ioctx_table
);
331 for (i
= 0; i
< table
->nr
; i
++) {
334 ctx
= rcu_dereference(table
->table
[i
]);
335 if (ctx
&& ctx
->aio_ring_file
== file
) {
336 if (!atomic_read(&ctx
->dead
)) {
337 ctx
->user_id
= ctx
->mmap_base
= vma
->vm_start
;
345 spin_unlock(&mm
->ioctx_lock
);
349 static const struct vm_operations_struct aio_ring_vm_ops
= {
350 .mremap
= aio_ring_mremap
,
351 #if IS_ENABLED(CONFIG_MMU)
352 .fault
= filemap_fault
,
353 .map_pages
= filemap_map_pages
,
354 .page_mkwrite
= filemap_page_mkwrite
,
358 static int aio_ring_mmap(struct file
*file
, struct vm_area_struct
*vma
)
360 vma
->vm_flags
|= VM_DONTEXPAND
;
361 vma
->vm_ops
= &aio_ring_vm_ops
;
365 static const struct file_operations aio_ring_fops
= {
366 .mmap
= aio_ring_mmap
,
369 #if IS_ENABLED(CONFIG_MIGRATION)
370 static int aio_migratepage(struct address_space
*mapping
, struct page
*new,
371 struct page
*old
, enum migrate_mode mode
)
379 * We cannot support the _NO_COPY case here, because copy needs to
380 * happen under the ctx->completion_lock. That does not work with the
381 * migration workflow of MIGRATE_SYNC_NO_COPY.
383 if (mode
== MIGRATE_SYNC_NO_COPY
)
388 /* mapping->private_lock here protects against the kioctx teardown. */
389 spin_lock(&mapping
->private_lock
);
390 ctx
= mapping
->private_data
;
396 /* The ring_lock mutex. The prevents aio_read_events() from writing
397 * to the ring's head, and prevents page migration from mucking in
398 * a partially initialized kiotx.
400 if (!mutex_trylock(&ctx
->ring_lock
)) {
406 if (idx
< (pgoff_t
)ctx
->nr_pages
) {
407 /* Make sure the old page hasn't already been changed */
408 if (ctx
->ring_pages
[idx
] != old
)
416 /* Writeback must be complete */
417 BUG_ON(PageWriteback(old
));
420 rc
= migrate_page_move_mapping(mapping
, new, old
, NULL
, mode
, 1);
421 if (rc
!= MIGRATEPAGE_SUCCESS
) {
426 /* Take completion_lock to prevent other writes to the ring buffer
427 * while the old page is copied to the new. This prevents new
428 * events from being lost.
430 spin_lock_irqsave(&ctx
->completion_lock
, flags
);
431 migrate_page_copy(new, old
);
432 BUG_ON(ctx
->ring_pages
[idx
] != old
);
433 ctx
->ring_pages
[idx
] = new;
434 spin_unlock_irqrestore(&ctx
->completion_lock
, flags
);
436 /* The old page is no longer accessible. */
440 mutex_unlock(&ctx
->ring_lock
);
442 spin_unlock(&mapping
->private_lock
);
447 static const struct address_space_operations aio_ctx_aops
= {
448 .set_page_dirty
= __set_page_dirty_no_writeback
,
449 #if IS_ENABLED(CONFIG_MIGRATION)
450 .migratepage
= aio_migratepage
,
454 static int aio_setup_ring(struct kioctx
*ctx
, unsigned int nr_events
)
456 struct aio_ring
*ring
;
457 struct mm_struct
*mm
= current
->mm
;
458 unsigned long size
, unused
;
463 /* Compensate for the ring buffer's head/tail overlap entry */
464 nr_events
+= 2; /* 1 is required, 2 for good luck */
466 size
= sizeof(struct aio_ring
);
467 size
+= sizeof(struct io_event
) * nr_events
;
469 nr_pages
= PFN_UP(size
);
473 file
= aio_private_file(ctx
, nr_pages
);
475 ctx
->aio_ring_file
= NULL
;
479 ctx
->aio_ring_file
= file
;
480 nr_events
= (PAGE_SIZE
* nr_pages
- sizeof(struct aio_ring
))
481 / sizeof(struct io_event
);
483 ctx
->ring_pages
= ctx
->internal_pages
;
484 if (nr_pages
> AIO_RING_PAGES
) {
485 ctx
->ring_pages
= kcalloc(nr_pages
, sizeof(struct page
*),
487 if (!ctx
->ring_pages
) {
488 put_aio_ring_file(ctx
);
493 for (i
= 0; i
< nr_pages
; i
++) {
495 page
= find_or_create_page(file
->f_mapping
,
496 i
, GFP_HIGHUSER
| __GFP_ZERO
);
499 pr_debug("pid(%d) page[%d]->count=%d\n",
500 current
->pid
, i
, page_count(page
));
501 SetPageUptodate(page
);
504 ctx
->ring_pages
[i
] = page
;
508 if (unlikely(i
!= nr_pages
)) {
513 ctx
->mmap_size
= nr_pages
* PAGE_SIZE
;
514 pr_debug("attempting mmap of %lu bytes\n", ctx
->mmap_size
);
516 if (down_write_killable(&mm
->mmap_sem
)) {
522 ctx
->mmap_base
= do_mmap_pgoff(ctx
->aio_ring_file
, 0, ctx
->mmap_size
,
523 PROT_READ
| PROT_WRITE
,
524 MAP_SHARED
, 0, &unused
, NULL
);
525 up_write(&mm
->mmap_sem
);
526 if (IS_ERR((void *)ctx
->mmap_base
)) {
532 pr_debug("mmap address: 0x%08lx\n", ctx
->mmap_base
);
534 ctx
->user_id
= ctx
->mmap_base
;
535 ctx
->nr_events
= nr_events
; /* trusted copy */
537 ring
= kmap_atomic(ctx
->ring_pages
[0]);
538 ring
->nr
= nr_events
; /* user copy */
540 ring
->head
= ring
->tail
= 0;
541 ring
->magic
= AIO_RING_MAGIC
;
542 ring
->compat_features
= AIO_RING_COMPAT_FEATURES
;
543 ring
->incompat_features
= AIO_RING_INCOMPAT_FEATURES
;
544 ring
->header_length
= sizeof(struct aio_ring
);
546 flush_dcache_page(ctx
->ring_pages
[0]);
551 #define AIO_EVENTS_PER_PAGE (PAGE_SIZE / sizeof(struct io_event))
552 #define AIO_EVENTS_FIRST_PAGE ((PAGE_SIZE - sizeof(struct aio_ring)) / sizeof(struct io_event))
553 #define AIO_EVENTS_OFFSET (AIO_EVENTS_PER_PAGE - AIO_EVENTS_FIRST_PAGE)
555 void kiocb_set_cancel_fn(struct kiocb
*iocb
, kiocb_cancel_fn
*cancel
)
557 struct aio_kiocb
*req
= container_of(iocb
, struct aio_kiocb
, common
);
558 struct kioctx
*ctx
= req
->ki_ctx
;
561 spin_lock_irqsave(&ctx
->ctx_lock
, flags
);
563 if (!req
->ki_list
.next
)
564 list_add(&req
->ki_list
, &ctx
->active_reqs
);
566 req
->ki_cancel
= cancel
;
568 spin_unlock_irqrestore(&ctx
->ctx_lock
, flags
);
570 EXPORT_SYMBOL(kiocb_set_cancel_fn
);
572 static int kiocb_cancel(struct aio_kiocb
*kiocb
)
574 kiocb_cancel_fn
*old
, *cancel
;
577 * Don't want to set kiocb->ki_cancel = KIOCB_CANCELLED unless it
578 * actually has a cancel function, hence the cmpxchg()
581 cancel
= ACCESS_ONCE(kiocb
->ki_cancel
);
583 if (!cancel
|| cancel
== KIOCB_CANCELLED
)
587 cancel
= cmpxchg(&kiocb
->ki_cancel
, old
, KIOCB_CANCELLED
);
588 } while (cancel
!= old
);
590 return cancel(&kiocb
->common
);
594 * free_ioctx() should be RCU delayed to synchronize against the RCU
595 * protected lookup_ioctx() and also needs process context to call
596 * aio_free_ring(), so the double bouncing through kioctx->free_rcu and
599 static void free_ioctx(struct work_struct
*work
)
601 struct kioctx
*ctx
= container_of(work
, struct kioctx
, free_work
);
603 pr_debug("freeing %p\n", ctx
);
606 free_percpu(ctx
->cpu
);
607 percpu_ref_exit(&ctx
->reqs
);
608 percpu_ref_exit(&ctx
->users
);
609 kmem_cache_free(kioctx_cachep
, ctx
);
612 static void free_ioctx_rcufn(struct rcu_head
*head
)
614 struct kioctx
*ctx
= container_of(head
, struct kioctx
, free_rcu
);
616 INIT_WORK(&ctx
->free_work
, free_ioctx
);
617 schedule_work(&ctx
->free_work
);
620 static void free_ioctx_reqs(struct percpu_ref
*ref
)
622 struct kioctx
*ctx
= container_of(ref
, struct kioctx
, reqs
);
624 /* At this point we know that there are no any in-flight requests */
625 if (ctx
->rq_wait
&& atomic_dec_and_test(&ctx
->rq_wait
->count
))
626 complete(&ctx
->rq_wait
->comp
);
628 /* Synchronize against RCU protected table->table[] dereferences */
629 call_rcu(&ctx
->free_rcu
, free_ioctx_rcufn
);
633 * When this function runs, the kioctx has been removed from the "hash table"
634 * and ctx->users has dropped to 0, so we know no more kiocbs can be submitted -
635 * now it's safe to cancel any that need to be.
637 static void free_ioctx_users(struct percpu_ref
*ref
)
639 struct kioctx
*ctx
= container_of(ref
, struct kioctx
, users
);
640 struct aio_kiocb
*req
;
642 spin_lock_irq(&ctx
->ctx_lock
);
644 while (!list_empty(&ctx
->active_reqs
)) {
645 req
= list_first_entry(&ctx
->active_reqs
,
646 struct aio_kiocb
, ki_list
);
648 list_del_init(&req
->ki_list
);
651 spin_unlock_irq(&ctx
->ctx_lock
);
653 percpu_ref_kill(&ctx
->reqs
);
654 percpu_ref_put(&ctx
->reqs
);
657 static int ioctx_add_table(struct kioctx
*ctx
, struct mm_struct
*mm
)
660 struct kioctx_table
*table
, *old
;
661 struct aio_ring
*ring
;
663 spin_lock(&mm
->ioctx_lock
);
664 table
= rcu_dereference_raw(mm
->ioctx_table
);
668 for (i
= 0; i
< table
->nr
; i
++)
669 if (!rcu_access_pointer(table
->table
[i
])) {
671 rcu_assign_pointer(table
->table
[i
], ctx
);
672 spin_unlock(&mm
->ioctx_lock
);
674 /* While kioctx setup is in progress,
675 * we are protected from page migration
676 * changes ring_pages by ->ring_lock.
678 ring
= kmap_atomic(ctx
->ring_pages
[0]);
684 new_nr
= (table
? table
->nr
: 1) * 4;
685 spin_unlock(&mm
->ioctx_lock
);
687 table
= kzalloc(sizeof(*table
) + sizeof(struct kioctx
*) *
694 spin_lock(&mm
->ioctx_lock
);
695 old
= rcu_dereference_raw(mm
->ioctx_table
);
698 rcu_assign_pointer(mm
->ioctx_table
, table
);
699 } else if (table
->nr
> old
->nr
) {
700 memcpy(table
->table
, old
->table
,
701 old
->nr
* sizeof(struct kioctx
*));
703 rcu_assign_pointer(mm
->ioctx_table
, table
);
712 static void aio_nr_sub(unsigned nr
)
714 spin_lock(&aio_nr_lock
);
715 if (WARN_ON(aio_nr
- nr
> aio_nr
))
719 spin_unlock(&aio_nr_lock
);
723 * Allocates and initializes an ioctx. Returns an ERR_PTR if it failed.
725 static struct kioctx
*ioctx_alloc(unsigned nr_events
)
727 struct mm_struct
*mm
= current
->mm
;
732 * Store the original nr_events -- what userspace passed to io_setup(),
733 * for counting against the global limit -- before it changes.
735 unsigned int max_reqs
= nr_events
;
738 * We keep track of the number of available ringbuffer slots, to prevent
739 * overflow (reqs_available), and we also use percpu counters for this.
741 * So since up to half the slots might be on other cpu's percpu counters
742 * and unavailable, double nr_events so userspace sees what they
743 * expected: additionally, we move req_batch slots to/from percpu
744 * counters at a time, so make sure that isn't 0:
746 nr_events
= max(nr_events
, num_possible_cpus() * 4);
749 /* Prevent overflows */
750 if (nr_events
> (0x10000000U
/ sizeof(struct io_event
))) {
751 pr_debug("ENOMEM: nr_events too high\n");
752 return ERR_PTR(-EINVAL
);
755 if (!nr_events
|| (unsigned long)max_reqs
> aio_max_nr
)
756 return ERR_PTR(-EAGAIN
);
758 ctx
= kmem_cache_zalloc(kioctx_cachep
, GFP_KERNEL
);
760 return ERR_PTR(-ENOMEM
);
762 ctx
->max_reqs
= max_reqs
;
764 spin_lock_init(&ctx
->ctx_lock
);
765 spin_lock_init(&ctx
->completion_lock
);
766 mutex_init(&ctx
->ring_lock
);
767 /* Protect against page migration throughout kiotx setup by keeping
768 * the ring_lock mutex held until setup is complete. */
769 mutex_lock(&ctx
->ring_lock
);
770 init_waitqueue_head(&ctx
->wait
);
772 INIT_LIST_HEAD(&ctx
->active_reqs
);
774 if (percpu_ref_init(&ctx
->users
, free_ioctx_users
, 0, GFP_KERNEL
))
777 if (percpu_ref_init(&ctx
->reqs
, free_ioctx_reqs
, 0, GFP_KERNEL
))
780 ctx
->cpu
= alloc_percpu(struct kioctx_cpu
);
784 err
= aio_setup_ring(ctx
, nr_events
);
788 atomic_set(&ctx
->reqs_available
, ctx
->nr_events
- 1);
789 ctx
->req_batch
= (ctx
->nr_events
- 1) / (num_possible_cpus() * 4);
790 if (ctx
->req_batch
< 1)
793 /* limit the number of system wide aios */
794 spin_lock(&aio_nr_lock
);
795 if (aio_nr
+ ctx
->max_reqs
> aio_max_nr
||
796 aio_nr
+ ctx
->max_reqs
< aio_nr
) {
797 spin_unlock(&aio_nr_lock
);
801 aio_nr
+= ctx
->max_reqs
;
802 spin_unlock(&aio_nr_lock
);
804 percpu_ref_get(&ctx
->users
); /* io_setup() will drop this ref */
805 percpu_ref_get(&ctx
->reqs
); /* free_ioctx_users() will drop this */
807 err
= ioctx_add_table(ctx
, mm
);
811 /* Release the ring_lock mutex now that all setup is complete. */
812 mutex_unlock(&ctx
->ring_lock
);
814 pr_debug("allocated ioctx %p[%ld]: mm=%p mask=0x%x\n",
815 ctx
, ctx
->user_id
, mm
, ctx
->nr_events
);
819 aio_nr_sub(ctx
->max_reqs
);
821 atomic_set(&ctx
->dead
, 1);
823 vm_munmap(ctx
->mmap_base
, ctx
->mmap_size
);
826 mutex_unlock(&ctx
->ring_lock
);
827 free_percpu(ctx
->cpu
);
828 percpu_ref_exit(&ctx
->reqs
);
829 percpu_ref_exit(&ctx
->users
);
830 kmem_cache_free(kioctx_cachep
, ctx
);
831 pr_debug("error allocating ioctx %d\n", err
);
836 * Cancels all outstanding aio requests on an aio context. Used
837 * when the processes owning a context have all exited to encourage
838 * the rapid destruction of the kioctx.
840 static int kill_ioctx(struct mm_struct
*mm
, struct kioctx
*ctx
,
841 struct ctx_rq_wait
*wait
)
843 struct kioctx_table
*table
;
845 spin_lock(&mm
->ioctx_lock
);
846 if (atomic_xchg(&ctx
->dead
, 1)) {
847 spin_unlock(&mm
->ioctx_lock
);
851 table
= rcu_dereference_raw(mm
->ioctx_table
);
852 WARN_ON(ctx
!= rcu_access_pointer(table
->table
[ctx
->id
]));
853 RCU_INIT_POINTER(table
->table
[ctx
->id
], NULL
);
854 spin_unlock(&mm
->ioctx_lock
);
856 /* free_ioctx_reqs() will do the necessary RCU synchronization */
857 wake_up_all(&ctx
->wait
);
860 * It'd be more correct to do this in free_ioctx(), after all
861 * the outstanding kiocbs have finished - but by then io_destroy
862 * has already returned, so io_setup() could potentially return
863 * -EAGAIN with no ioctxs actually in use (as far as userspace
866 aio_nr_sub(ctx
->max_reqs
);
869 vm_munmap(ctx
->mmap_base
, ctx
->mmap_size
);
872 percpu_ref_kill(&ctx
->users
);
877 * exit_aio: called when the last user of mm goes away. At this point, there is
878 * no way for any new requests to be submited or any of the io_* syscalls to be
879 * called on the context.
881 * There may be outstanding kiocbs, but free_ioctx() will explicitly wait on
884 void exit_aio(struct mm_struct
*mm
)
886 struct kioctx_table
*table
= rcu_dereference_raw(mm
->ioctx_table
);
887 struct ctx_rq_wait wait
;
893 atomic_set(&wait
.count
, table
->nr
);
894 init_completion(&wait
.comp
);
897 for (i
= 0; i
< table
->nr
; ++i
) {
899 rcu_dereference_protected(table
->table
[i
], true);
907 * We don't need to bother with munmap() here - exit_mmap(mm)
908 * is coming and it'll unmap everything. And we simply can't,
909 * this is not necessarily our ->mm.
910 * Since kill_ioctx() uses non-zero ->mmap_size as indicator
911 * that it needs to unmap the area, just set it to 0.
914 kill_ioctx(mm
, ctx
, &wait
);
917 if (!atomic_sub_and_test(skipped
, &wait
.count
)) {
918 /* Wait until all IO for the context are done. */
919 wait_for_completion(&wait
.comp
);
922 RCU_INIT_POINTER(mm
->ioctx_table
, NULL
);
926 static void put_reqs_available(struct kioctx
*ctx
, unsigned nr
)
928 struct kioctx_cpu
*kcpu
;
931 local_irq_save(flags
);
932 kcpu
= this_cpu_ptr(ctx
->cpu
);
933 kcpu
->reqs_available
+= nr
;
935 while (kcpu
->reqs_available
>= ctx
->req_batch
* 2) {
936 kcpu
->reqs_available
-= ctx
->req_batch
;
937 atomic_add(ctx
->req_batch
, &ctx
->reqs_available
);
940 local_irq_restore(flags
);
943 static bool get_reqs_available(struct kioctx
*ctx
)
945 struct kioctx_cpu
*kcpu
;
949 local_irq_save(flags
);
950 kcpu
= this_cpu_ptr(ctx
->cpu
);
951 if (!kcpu
->reqs_available
) {
952 int old
, avail
= atomic_read(&ctx
->reqs_available
);
955 if (avail
< ctx
->req_batch
)
959 avail
= atomic_cmpxchg(&ctx
->reqs_available
,
960 avail
, avail
- ctx
->req_batch
);
961 } while (avail
!= old
);
963 kcpu
->reqs_available
+= ctx
->req_batch
;
967 kcpu
->reqs_available
--;
969 local_irq_restore(flags
);
973 /* refill_reqs_available
974 * Updates the reqs_available reference counts used for tracking the
975 * number of free slots in the completion ring. This can be called
976 * from aio_complete() (to optimistically update reqs_available) or
977 * from aio_get_req() (the we're out of events case). It must be
978 * called holding ctx->completion_lock.
980 static void refill_reqs_available(struct kioctx
*ctx
, unsigned head
,
983 unsigned events_in_ring
, completed
;
985 /* Clamp head since userland can write to it. */
986 head
%= ctx
->nr_events
;
988 events_in_ring
= tail
- head
;
990 events_in_ring
= ctx
->nr_events
- (head
- tail
);
992 completed
= ctx
->completed_events
;
993 if (events_in_ring
< completed
)
994 completed
-= events_in_ring
;
1001 ctx
->completed_events
-= completed
;
1002 put_reqs_available(ctx
, completed
);
1005 /* user_refill_reqs_available
1006 * Called to refill reqs_available when aio_get_req() encounters an
1007 * out of space in the completion ring.
1009 static void user_refill_reqs_available(struct kioctx
*ctx
)
1011 spin_lock_irq(&ctx
->completion_lock
);
1012 if (ctx
->completed_events
) {
1013 struct aio_ring
*ring
;
1016 /* Access of ring->head may race with aio_read_events_ring()
1017 * here, but that's okay since whether we read the old version
1018 * or the new version, and either will be valid. The important
1019 * part is that head cannot pass tail since we prevent
1020 * aio_complete() from updating tail by holding
1021 * ctx->completion_lock. Even if head is invalid, the check
1022 * against ctx->completed_events below will make sure we do the
1025 ring
= kmap_atomic(ctx
->ring_pages
[0]);
1027 kunmap_atomic(ring
);
1029 refill_reqs_available(ctx
, head
, ctx
->tail
);
1032 spin_unlock_irq(&ctx
->completion_lock
);
1036 * Allocate a slot for an aio request.
1037 * Returns NULL if no requests are free.
1039 static inline struct aio_kiocb
*aio_get_req(struct kioctx
*ctx
)
1041 struct aio_kiocb
*req
;
1043 if (!get_reqs_available(ctx
)) {
1044 user_refill_reqs_available(ctx
);
1045 if (!get_reqs_available(ctx
))
1049 req
= kmem_cache_alloc(kiocb_cachep
, GFP_KERNEL
|__GFP_ZERO
);
1053 percpu_ref_get(&ctx
->reqs
);
1058 put_reqs_available(ctx
, 1);
1062 static void kiocb_free(struct aio_kiocb
*req
)
1064 if (req
->common
.ki_filp
)
1065 fput(req
->common
.ki_filp
);
1066 if (req
->ki_eventfd
!= NULL
)
1067 eventfd_ctx_put(req
->ki_eventfd
);
1068 kmem_cache_free(kiocb_cachep
, req
);
1071 static struct kioctx
*lookup_ioctx(unsigned long ctx_id
)
1073 struct aio_ring __user
*ring
= (void __user
*)ctx_id
;
1074 struct mm_struct
*mm
= current
->mm
;
1075 struct kioctx
*ctx
, *ret
= NULL
;
1076 struct kioctx_table
*table
;
1079 if (get_user(id
, &ring
->id
))
1083 table
= rcu_dereference(mm
->ioctx_table
);
1085 if (!table
|| id
>= table
->nr
)
1088 id
= array_index_nospec(id
, table
->nr
);
1089 ctx
= rcu_dereference(table
->table
[id
]);
1090 if (ctx
&& ctx
->user_id
== ctx_id
) {
1091 if (percpu_ref_tryget_live(&ctx
->users
))
1100 * Called when the io request on the given iocb is complete.
1102 static void aio_complete(struct kiocb
*kiocb
, long res
, long res2
)
1104 struct aio_kiocb
*iocb
= container_of(kiocb
, struct aio_kiocb
, common
);
1105 struct kioctx
*ctx
= iocb
->ki_ctx
;
1106 struct aio_ring
*ring
;
1107 struct io_event
*ev_page
, *event
;
1108 unsigned tail
, pos
, head
;
1109 unsigned long flags
;
1111 if (kiocb
->ki_flags
& IOCB_WRITE
) {
1112 struct file
*file
= kiocb
->ki_filp
;
1115 * Tell lockdep we inherited freeze protection from submission
1118 if (S_ISREG(file_inode(file
)->i_mode
))
1119 __sb_writers_acquired(file_inode(file
)->i_sb
, SB_FREEZE_WRITE
);
1120 file_end_write(file
);
1124 * Special case handling for sync iocbs:
1125 * - events go directly into the iocb for fast handling
1126 * - the sync task with the iocb in its stack holds the single iocb
1127 * ref, no other paths have a way to get another ref
1128 * - the sync task helpfully left a reference to itself in the iocb
1130 BUG_ON(is_sync_kiocb(kiocb
));
1132 if (iocb
->ki_list
.next
) {
1133 unsigned long flags
;
1135 spin_lock_irqsave(&ctx
->ctx_lock
, flags
);
1136 list_del(&iocb
->ki_list
);
1137 spin_unlock_irqrestore(&ctx
->ctx_lock
, flags
);
1141 * Add a completion event to the ring buffer. Must be done holding
1142 * ctx->completion_lock to prevent other code from messing with the tail
1143 * pointer since we might be called from irq context.
1145 spin_lock_irqsave(&ctx
->completion_lock
, flags
);
1148 pos
= tail
+ AIO_EVENTS_OFFSET
;
1150 if (++tail
>= ctx
->nr_events
)
1153 ev_page
= kmap_atomic(ctx
->ring_pages
[pos
/ AIO_EVENTS_PER_PAGE
]);
1154 event
= ev_page
+ pos
% AIO_EVENTS_PER_PAGE
;
1156 event
->obj
= (u64
)(unsigned long)iocb
->ki_user_iocb
;
1157 event
->data
= iocb
->ki_user_data
;
1161 kunmap_atomic(ev_page
);
1162 flush_dcache_page(ctx
->ring_pages
[pos
/ AIO_EVENTS_PER_PAGE
]);
1164 pr_debug("%p[%u]: %p: %p %Lx %lx %lx\n",
1165 ctx
, tail
, iocb
, iocb
->ki_user_iocb
, iocb
->ki_user_data
,
1168 /* after flagging the request as done, we
1169 * must never even look at it again
1171 smp_wmb(); /* make event visible before updating tail */
1175 ring
= kmap_atomic(ctx
->ring_pages
[0]);
1178 kunmap_atomic(ring
);
1179 flush_dcache_page(ctx
->ring_pages
[0]);
1181 ctx
->completed_events
++;
1182 if (ctx
->completed_events
> 1)
1183 refill_reqs_available(ctx
, head
, tail
);
1184 spin_unlock_irqrestore(&ctx
->completion_lock
, flags
);
1186 pr_debug("added to ring %p at [%u]\n", iocb
, tail
);
1189 * Check if the user asked us to deliver the result through an
1190 * eventfd. The eventfd_signal() function is safe to be called
1193 if (iocb
->ki_eventfd
!= NULL
)
1194 eventfd_signal(iocb
->ki_eventfd
, 1);
1196 /* everything turned out well, dispose of the aiocb. */
1200 * We have to order our ring_info tail store above and test
1201 * of the wait list below outside the wait lock. This is
1202 * like in wake_up_bit() where clearing a bit has to be
1203 * ordered with the unlocked test.
1207 if (waitqueue_active(&ctx
->wait
))
1208 wake_up(&ctx
->wait
);
1210 percpu_ref_put(&ctx
->reqs
);
1213 /* aio_read_events_ring
1214 * Pull an event off of the ioctx's event ring. Returns the number of
1217 static long aio_read_events_ring(struct kioctx
*ctx
,
1218 struct io_event __user
*event
, long nr
)
1220 struct aio_ring
*ring
;
1221 unsigned head
, tail
, pos
;
1226 * The mutex can block and wake us up and that will cause
1227 * wait_event_interruptible_hrtimeout() to schedule without sleeping
1228 * and repeat. This should be rare enough that it doesn't cause
1229 * peformance issues. See the comment in read_events() for more detail.
1231 sched_annotate_sleep();
1232 mutex_lock(&ctx
->ring_lock
);
1234 /* Access to ->ring_pages here is protected by ctx->ring_lock. */
1235 ring
= kmap_atomic(ctx
->ring_pages
[0]);
1238 kunmap_atomic(ring
);
1241 * Ensure that once we've read the current tail pointer, that
1242 * we also see the events that were stored up to the tail.
1246 pr_debug("h%u t%u m%u\n", head
, tail
, ctx
->nr_events
);
1251 head
%= ctx
->nr_events
;
1252 tail
%= ctx
->nr_events
;
1256 struct io_event
*ev
;
1259 avail
= (head
<= tail
? tail
: ctx
->nr_events
) - head
;
1263 avail
= min(avail
, nr
- ret
);
1264 avail
= min_t(long, avail
, AIO_EVENTS_PER_PAGE
-
1265 ((head
+ AIO_EVENTS_OFFSET
) % AIO_EVENTS_PER_PAGE
));
1267 pos
= head
+ AIO_EVENTS_OFFSET
;
1268 page
= ctx
->ring_pages
[pos
/ AIO_EVENTS_PER_PAGE
];
1269 pos
%= AIO_EVENTS_PER_PAGE
;
1272 copy_ret
= copy_to_user(event
+ ret
, ev
+ pos
,
1273 sizeof(*ev
) * avail
);
1276 if (unlikely(copy_ret
)) {
1283 head
%= ctx
->nr_events
;
1286 ring
= kmap_atomic(ctx
->ring_pages
[0]);
1288 kunmap_atomic(ring
);
1289 flush_dcache_page(ctx
->ring_pages
[0]);
1291 pr_debug("%li h%u t%u\n", ret
, head
, tail
);
1293 mutex_unlock(&ctx
->ring_lock
);
1298 static bool aio_read_events(struct kioctx
*ctx
, long min_nr
, long nr
,
1299 struct io_event __user
*event
, long *i
)
1301 long ret
= aio_read_events_ring(ctx
, event
+ *i
, nr
- *i
);
1306 if (unlikely(atomic_read(&ctx
->dead
)))
1312 return ret
< 0 || *i
>= min_nr
;
1315 static long read_events(struct kioctx
*ctx
, long min_nr
, long nr
,
1316 struct io_event __user
*event
,
1317 struct timespec __user
*timeout
)
1319 ktime_t until
= KTIME_MAX
;
1325 if (unlikely(copy_from_user(&ts
, timeout
, sizeof(ts
))))
1328 until
= timespec_to_ktime(ts
);
1332 * Note that aio_read_events() is being called as the conditional - i.e.
1333 * we're calling it after prepare_to_wait() has set task state to
1334 * TASK_INTERRUPTIBLE.
1336 * But aio_read_events() can block, and if it blocks it's going to flip
1337 * the task state back to TASK_RUNNING.
1339 * This should be ok, provided it doesn't flip the state back to
1340 * TASK_RUNNING and return 0 too much - that causes us to spin. That
1341 * will only happen if the mutex_lock() call blocks, and we then find
1342 * the ringbuffer empty. So in practice we should be ok, but it's
1343 * something to be aware of when touching this code.
1346 aio_read_events(ctx
, min_nr
, nr
, event
, &ret
);
1348 wait_event_interruptible_hrtimeout(ctx
->wait
,
1349 aio_read_events(ctx
, min_nr
, nr
, event
, &ret
),
1352 if (!ret
&& signal_pending(current
))
1359 * Create an aio_context capable of receiving at least nr_events.
1360 * ctxp must not point to an aio_context that already exists, and
1361 * must be initialized to 0 prior to the call. On successful
1362 * creation of the aio_context, *ctxp is filled in with the resulting
1363 * handle. May fail with -EINVAL if *ctxp is not initialized,
1364 * if the specified nr_events exceeds internal limits. May fail
1365 * with -EAGAIN if the specified nr_events exceeds the user's limit
1366 * of available events. May fail with -ENOMEM if insufficient kernel
1367 * resources are available. May fail with -EFAULT if an invalid
1368 * pointer is passed for ctxp. Will fail with -ENOSYS if not
1371 SYSCALL_DEFINE2(io_setup
, unsigned, nr_events
, aio_context_t __user
*, ctxp
)
1373 struct kioctx
*ioctx
= NULL
;
1377 ret
= get_user(ctx
, ctxp
);
1382 if (unlikely(ctx
|| nr_events
== 0)) {
1383 pr_debug("EINVAL: ctx %lu nr_events %u\n",
1388 ioctx
= ioctx_alloc(nr_events
);
1389 ret
= PTR_ERR(ioctx
);
1390 if (!IS_ERR(ioctx
)) {
1391 ret
= put_user(ioctx
->user_id
, ctxp
);
1393 kill_ioctx(current
->mm
, ioctx
, NULL
);
1394 percpu_ref_put(&ioctx
->users
);
1401 #ifdef CONFIG_COMPAT
1402 COMPAT_SYSCALL_DEFINE2(io_setup
, unsigned, nr_events
, u32 __user
*, ctx32p
)
1404 struct kioctx
*ioctx
= NULL
;
1408 ret
= get_user(ctx
, ctx32p
);
1413 if (unlikely(ctx
|| nr_events
== 0)) {
1414 pr_debug("EINVAL: ctx %lu nr_events %u\n",
1419 ioctx
= ioctx_alloc(nr_events
);
1420 ret
= PTR_ERR(ioctx
);
1421 if (!IS_ERR(ioctx
)) {
1422 /* truncating is ok because it's a user address */
1423 ret
= put_user((u32
)ioctx
->user_id
, ctx32p
);
1425 kill_ioctx(current
->mm
, ioctx
, NULL
);
1426 percpu_ref_put(&ioctx
->users
);
1435 * Destroy the aio_context specified. May cancel any outstanding
1436 * AIOs and block on completion. Will fail with -ENOSYS if not
1437 * implemented. May fail with -EINVAL if the context pointed to
1440 SYSCALL_DEFINE1(io_destroy
, aio_context_t
, ctx
)
1442 struct kioctx
*ioctx
= lookup_ioctx(ctx
);
1443 if (likely(NULL
!= ioctx
)) {
1444 struct ctx_rq_wait wait
;
1447 init_completion(&wait
.comp
);
1448 atomic_set(&wait
.count
, 1);
1450 /* Pass requests_done to kill_ioctx() where it can be set
1451 * in a thread-safe way. If we try to set it here then we have
1452 * a race condition if two io_destroy() called simultaneously.
1454 ret
= kill_ioctx(current
->mm
, ioctx
, &wait
);
1455 percpu_ref_put(&ioctx
->users
);
1457 /* Wait until all IO for the context are done. Otherwise kernel
1458 * keep using user-space buffers even if user thinks the context
1462 wait_for_completion(&wait
.comp
);
1466 pr_debug("EINVAL: invalid context id\n");
1470 static int aio_setup_rw(int rw
, struct iocb
*iocb
, struct iovec
**iovec
,
1471 bool vectored
, bool compat
, struct iov_iter
*iter
)
1473 void __user
*buf
= (void __user
*)(uintptr_t)iocb
->aio_buf
;
1474 size_t len
= iocb
->aio_nbytes
;
1477 ssize_t ret
= import_single_range(rw
, buf
, len
, *iovec
, iter
);
1481 #ifdef CONFIG_COMPAT
1483 return compat_import_iovec(rw
, buf
, len
, UIO_FASTIOV
, iovec
,
1486 return import_iovec(rw
, buf
, len
, UIO_FASTIOV
, iovec
, iter
);
1489 static inline ssize_t
aio_ret(struct kiocb
*req
, ssize_t ret
)
1495 case -ERESTARTNOINTR
:
1496 case -ERESTARTNOHAND
:
1497 case -ERESTART_RESTARTBLOCK
:
1499 * There's no easy way to restart the syscall since other AIO's
1500 * may be already running. Just fail this IO with EINTR.
1505 aio_complete(req
, ret
, 0);
1510 static ssize_t
aio_read(struct kiocb
*req
, struct iocb
*iocb
, bool vectored
,
1513 struct file
*file
= req
->ki_filp
;
1514 struct iovec inline_vecs
[UIO_FASTIOV
], *iovec
= inline_vecs
;
1515 struct iov_iter iter
;
1518 if (unlikely(!(file
->f_mode
& FMODE_READ
)))
1520 if (unlikely(!file
->f_op
->read_iter
))
1523 ret
= aio_setup_rw(READ
, iocb
, &iovec
, vectored
, compat
, &iter
);
1526 ret
= rw_verify_area(READ
, file
, &req
->ki_pos
, iov_iter_count(&iter
));
1528 ret
= aio_ret(req
, call_read_iter(file
, req
, &iter
));
1533 static ssize_t
aio_write(struct kiocb
*req
, struct iocb
*iocb
, bool vectored
,
1536 struct file
*file
= req
->ki_filp
;
1537 struct iovec inline_vecs
[UIO_FASTIOV
], *iovec
= inline_vecs
;
1538 struct iov_iter iter
;
1541 if (unlikely(!(file
->f_mode
& FMODE_WRITE
)))
1543 if (unlikely(!file
->f_op
->write_iter
))
1546 ret
= aio_setup_rw(WRITE
, iocb
, &iovec
, vectored
, compat
, &iter
);
1549 ret
= rw_verify_area(WRITE
, file
, &req
->ki_pos
, iov_iter_count(&iter
));
1551 req
->ki_flags
|= IOCB_WRITE
;
1552 file_start_write(file
);
1553 ret
= aio_ret(req
, call_write_iter(file
, req
, &iter
));
1555 * We release freeze protection in aio_complete(). Fool lockdep
1556 * by telling it the lock got released so that it doesn't
1557 * complain about held lock when we return to userspace.
1559 if (S_ISREG(file_inode(file
)->i_mode
))
1560 __sb_writers_release(file_inode(file
)->i_sb
, SB_FREEZE_WRITE
);
1566 static int io_submit_one(struct kioctx
*ctx
, struct iocb __user
*user_iocb
,
1567 struct iocb
*iocb
, bool compat
)
1569 struct aio_kiocb
*req
;
1573 /* enforce forwards compatibility on users */
1574 if (unlikely(iocb
->aio_reserved2
)) {
1575 pr_debug("EINVAL: reserve field set\n");
1579 /* prevent overflows */
1581 (iocb
->aio_buf
!= (unsigned long)iocb
->aio_buf
) ||
1582 (iocb
->aio_nbytes
!= (size_t)iocb
->aio_nbytes
) ||
1583 ((ssize_t
)iocb
->aio_nbytes
< 0)
1585 pr_debug("EINVAL: overflow check\n");
1589 req
= aio_get_req(ctx
);
1593 req
->common
.ki_filp
= file
= fget(iocb
->aio_fildes
);
1594 if (unlikely(!req
->common
.ki_filp
)) {
1598 req
->common
.ki_pos
= iocb
->aio_offset
;
1599 req
->common
.ki_complete
= aio_complete
;
1600 req
->common
.ki_flags
= iocb_flags(req
->common
.ki_filp
);
1601 req
->common
.ki_hint
= file_write_hint(file
);
1603 if (iocb
->aio_flags
& IOCB_FLAG_RESFD
) {
1605 * If the IOCB_FLAG_RESFD flag of aio_flags is set, get an
1606 * instance of the file* now. The file descriptor must be
1607 * an eventfd() fd, and will be signaled for each completed
1608 * event using the eventfd_signal() function.
1610 req
->ki_eventfd
= eventfd_ctx_fdget((int) iocb
->aio_resfd
);
1611 if (IS_ERR(req
->ki_eventfd
)) {
1612 ret
= PTR_ERR(req
->ki_eventfd
);
1613 req
->ki_eventfd
= NULL
;
1617 req
->common
.ki_flags
|= IOCB_EVENTFD
;
1620 ret
= kiocb_set_rw_flags(&req
->common
, iocb
->aio_rw_flags
);
1621 if (unlikely(ret
)) {
1622 pr_debug("EINVAL: aio_rw_flags\n");
1626 ret
= put_user(KIOCB_KEY
, &user_iocb
->aio_key
);
1627 if (unlikely(ret
)) {
1628 pr_debug("EFAULT: aio_key\n");
1632 req
->ki_user_iocb
= user_iocb
;
1633 req
->ki_user_data
= iocb
->aio_data
;
1636 switch (iocb
->aio_lio_opcode
) {
1637 case IOCB_CMD_PREAD
:
1638 ret
= aio_read(&req
->common
, iocb
, false, compat
);
1640 case IOCB_CMD_PWRITE
:
1641 ret
= aio_write(&req
->common
, iocb
, false, compat
);
1643 case IOCB_CMD_PREADV
:
1644 ret
= aio_read(&req
->common
, iocb
, true, compat
);
1646 case IOCB_CMD_PWRITEV
:
1647 ret
= aio_write(&req
->common
, iocb
, true, compat
);
1650 pr_debug("invalid aio operation %d\n", iocb
->aio_lio_opcode
);
1656 if (ret
&& ret
!= -EIOCBQUEUED
)
1660 put_reqs_available(ctx
, 1);
1661 percpu_ref_put(&ctx
->reqs
);
1666 static long do_io_submit(aio_context_t ctx_id
, long nr
,
1667 struct iocb __user
*__user
*iocbpp
, bool compat
)
1672 struct blk_plug plug
;
1674 if (unlikely(nr
< 0))
1677 if (unlikely(nr
> LONG_MAX
/sizeof(*iocbpp
)))
1678 nr
= LONG_MAX
/sizeof(*iocbpp
);
1680 if (unlikely(!access_ok(VERIFY_READ
, iocbpp
, (nr
*sizeof(*iocbpp
)))))
1683 ctx
= lookup_ioctx(ctx_id
);
1684 if (unlikely(!ctx
)) {
1685 pr_debug("EINVAL: invalid context id\n");
1689 blk_start_plug(&plug
);
1692 * AKPM: should this return a partial result if some of the IOs were
1693 * successfully submitted?
1695 for (i
=0; i
<nr
; i
++) {
1696 struct iocb __user
*user_iocb
;
1699 if (unlikely(__get_user(user_iocb
, iocbpp
+ i
))) {
1704 if (unlikely(copy_from_user(&tmp
, user_iocb
, sizeof(tmp
)))) {
1709 ret
= io_submit_one(ctx
, user_iocb
, &tmp
, compat
);
1713 blk_finish_plug(&plug
);
1715 percpu_ref_put(&ctx
->users
);
1720 * Queue the nr iocbs pointed to by iocbpp for processing. Returns
1721 * the number of iocbs queued. May return -EINVAL if the aio_context
1722 * specified by ctx_id is invalid, if nr is < 0, if the iocb at
1723 * *iocbpp[0] is not properly initialized, if the operation specified
1724 * is invalid for the file descriptor in the iocb. May fail with
1725 * -EFAULT if any of the data structures point to invalid data. May
1726 * fail with -EBADF if the file descriptor specified in the first
1727 * iocb is invalid. May fail with -EAGAIN if insufficient resources
1728 * are available to queue any iocbs. Will return 0 if nr is 0. Will
1729 * fail with -ENOSYS if not implemented.
1731 SYSCALL_DEFINE3(io_submit
, aio_context_t
, ctx_id
, long, nr
,
1732 struct iocb __user
* __user
*, iocbpp
)
1734 return do_io_submit(ctx_id
, nr
, iocbpp
, 0);
1737 #ifdef CONFIG_COMPAT
1739 copy_iocb(long nr
, u32 __user
*ptr32
, struct iocb __user
* __user
*ptr64
)
1744 for (i
= 0; i
< nr
; ++i
) {
1745 if (get_user(uptr
, ptr32
+ i
))
1747 if (put_user(compat_ptr(uptr
), ptr64
+ i
))
1753 #define MAX_AIO_SUBMITS (PAGE_SIZE/sizeof(struct iocb *))
1755 COMPAT_SYSCALL_DEFINE3(io_submit
, compat_aio_context_t
, ctx_id
,
1756 int, nr
, u32 __user
*, iocb
)
1758 struct iocb __user
* __user
*iocb64
;
1761 if (unlikely(nr
< 0))
1764 if (nr
> MAX_AIO_SUBMITS
)
1765 nr
= MAX_AIO_SUBMITS
;
1767 iocb64
= compat_alloc_user_space(nr
* sizeof(*iocb64
));
1768 ret
= copy_iocb(nr
, iocb
, iocb64
);
1770 ret
= do_io_submit(ctx_id
, nr
, iocb64
, 1);
1776 * Finds a given iocb for cancellation.
1778 static struct aio_kiocb
*
1779 lookup_kiocb(struct kioctx
*ctx
, struct iocb __user
*iocb
, u32 key
)
1781 struct aio_kiocb
*kiocb
;
1783 assert_spin_locked(&ctx
->ctx_lock
);
1785 if (key
!= KIOCB_KEY
)
1788 /* TODO: use a hash or array, this sucks. */
1789 list_for_each_entry(kiocb
, &ctx
->active_reqs
, ki_list
) {
1790 if (kiocb
->ki_user_iocb
== iocb
)
1797 * Attempts to cancel an iocb previously passed to io_submit. If
1798 * the operation is successfully cancelled, the resulting event is
1799 * copied into the memory pointed to by result without being placed
1800 * into the completion queue and 0 is returned. May fail with
1801 * -EFAULT if any of the data structures pointed to are invalid.
1802 * May fail with -EINVAL if aio_context specified by ctx_id is
1803 * invalid. May fail with -EAGAIN if the iocb specified was not
1804 * cancelled. Will fail with -ENOSYS if not implemented.
1806 SYSCALL_DEFINE3(io_cancel
, aio_context_t
, ctx_id
, struct iocb __user
*, iocb
,
1807 struct io_event __user
*, result
)
1810 struct aio_kiocb
*kiocb
;
1814 ret
= get_user(key
, &iocb
->aio_key
);
1818 ctx
= lookup_ioctx(ctx_id
);
1822 spin_lock_irq(&ctx
->ctx_lock
);
1824 kiocb
= lookup_kiocb(ctx
, iocb
, key
);
1826 ret
= kiocb_cancel(kiocb
);
1830 spin_unlock_irq(&ctx
->ctx_lock
);
1834 * The result argument is no longer used - the io_event is
1835 * always delivered via the ring buffer. -EINPROGRESS indicates
1836 * cancellation is progress:
1841 percpu_ref_put(&ctx
->users
);
1847 * Attempts to read at least min_nr events and up to nr events from
1848 * the completion queue for the aio_context specified by ctx_id. If
1849 * it succeeds, the number of read events is returned. May fail with
1850 * -EINVAL if ctx_id is invalid, if min_nr is out of range, if nr is
1851 * out of range, if timeout is out of range. May fail with -EFAULT
1852 * if any of the memory specified is invalid. May return 0 or
1853 * < min_nr if the timeout specified by timeout has elapsed
1854 * before sufficient events are available, where timeout == NULL
1855 * specifies an infinite timeout. Note that the timeout pointed to by
1856 * timeout is relative. Will fail with -ENOSYS if not implemented.
1858 SYSCALL_DEFINE5(io_getevents
, aio_context_t
, ctx_id
,
1861 struct io_event __user
*, events
,
1862 struct timespec __user
*, timeout
)
1864 struct kioctx
*ioctx
= lookup_ioctx(ctx_id
);
1867 if (likely(ioctx
)) {
1868 if (likely(min_nr
<= nr
&& min_nr
>= 0))
1869 ret
= read_events(ioctx
, min_nr
, nr
, events
, timeout
);
1870 percpu_ref_put(&ioctx
->users
);
1875 #ifdef CONFIG_COMPAT
1876 COMPAT_SYSCALL_DEFINE5(io_getevents
, compat_aio_context_t
, ctx_id
,
1877 compat_long_t
, min_nr
,
1879 struct io_event __user
*, events
,
1880 struct compat_timespec __user
*, timeout
)
1883 struct timespec __user
*ut
= NULL
;
1886 if (compat_get_timespec(&t
, timeout
))
1889 ut
= compat_alloc_user_space(sizeof(*ut
));
1890 if (copy_to_user(ut
, &t
, sizeof(t
)))
1893 return sys_io_getevents(ctx_id
, min_nr
, nr
, events
, ut
);