2 * An async IO implementation for Linux
3 * Written by Benjamin LaHaise <bcrl@kvack.org>
5 * Implements an efficient asynchronous io interface.
7 * Copyright 2000, 2001, 2002 Red Hat, Inc. All Rights Reserved.
8 * Copyright 2018 Christoph Hellwig.
10 * See ../COPYING for licensing terms.
12 #define pr_fmt(fmt) "%s: " fmt, __func__
14 #include <linux/kernel.h>
15 #include <linux/init.h>
16 #include <linux/errno.h>
17 #include <linux/time.h>
18 #include <linux/aio_abi.h>
19 #include <linux/export.h>
20 #include <linux/syscalls.h>
21 #include <linux/backing-dev.h>
22 #include <linux/uio.h>
24 #include <linux/sched/signal.h>
26 #include <linux/file.h>
28 #include <linux/mman.h>
29 #include <linux/mmu_context.h>
30 #include <linux/percpu.h>
31 #include <linux/slab.h>
32 #include <linux/timer.h>
33 #include <linux/aio.h>
34 #include <linux/highmem.h>
35 #include <linux/workqueue.h>
36 #include <linux/security.h>
37 #include <linux/eventfd.h>
38 #include <linux/blkdev.h>
39 #include <linux/compat.h>
40 #include <linux/migrate.h>
41 #include <linux/ramfs.h>
42 #include <linux/percpu-refcount.h>
43 #include <linux/mount.h>
45 #include <asm/kmap_types.h>
46 #include <linux/uaccess.h>
52 #define AIO_RING_MAGIC 0xa10a10a1
53 #define AIO_RING_COMPAT_FEATURES 1
54 #define AIO_RING_INCOMPAT_FEATURES 0
56 unsigned id
; /* kernel internal index number */
57 unsigned nr
; /* number of io_events */
58 unsigned head
; /* Written to by userland or under ring_lock
59 * mutex by aio_read_events_ring(). */
63 unsigned compat_features
;
64 unsigned incompat_features
;
65 unsigned header_length
; /* size of aio_ring */
68 struct io_event io_events
[0];
69 }; /* 128 bytes + ring size */
71 #define AIO_RING_PAGES 8
76 struct kioctx __rcu
*table
[];
80 unsigned reqs_available
;
84 struct completion comp
;
89 struct percpu_ref users
;
92 struct percpu_ref reqs
;
94 unsigned long user_id
;
96 struct __percpu kioctx_cpu
*cpu
;
99 * For percpu reqs_available, number of slots we move to/from global
104 * This is what userspace passed to io_setup(), it's not used for
105 * anything but counting against the global max_reqs quota.
107 * The real limit is nr_events - 1, which will be larger (see
112 /* Size of ringbuffer, in units of struct io_event */
115 unsigned long mmap_base
;
116 unsigned long mmap_size
;
118 struct page
**ring_pages
;
121 struct rcu_work free_rwork
; /* see free_ioctx() */
124 * signals when all in-flight requests are done
126 struct ctx_rq_wait
*rq_wait
;
130 * This counts the number of available slots in the ringbuffer,
131 * so we avoid overflowing it: it's decremented (if positive)
132 * when allocating a kiocb and incremented when the resulting
133 * io_event is pulled off the ringbuffer.
135 * We batch accesses to it with a percpu version.
137 atomic_t reqs_available
;
138 } ____cacheline_aligned_in_smp
;
142 struct list_head active_reqs
; /* used for cancellation */
143 } ____cacheline_aligned_in_smp
;
146 struct mutex ring_lock
;
147 wait_queue_head_t wait
;
148 } ____cacheline_aligned_in_smp
;
152 unsigned completed_events
;
153 spinlock_t completion_lock
;
154 } ____cacheline_aligned_in_smp
;
156 struct page
*internal_pages
[AIO_RING_PAGES
];
157 struct file
*aio_ring_file
;
163 struct work_struct work
;
171 struct wait_queue_head
*head
;
174 struct wait_queue_entry wait
;
175 struct work_struct work
;
182 struct fsync_iocb fsync
;
183 struct poll_iocb poll
;
186 struct kioctx
*ki_ctx
;
187 kiocb_cancel_fn
*ki_cancel
;
189 struct iocb __user
*ki_user_iocb
; /* user's aiocb */
190 __u64 ki_user_data
; /* user's data for completion */
192 struct list_head ki_list
; /* the aio core uses this
193 * for cancellation */
196 * If the aio_resfd field of the userspace iocb is not zero,
197 * this is the underlying eventfd context to deliver events to.
199 struct eventfd_ctx
*ki_eventfd
;
202 /*------ sysctl variables----*/
203 static DEFINE_SPINLOCK(aio_nr_lock
);
204 unsigned long aio_nr
; /* current system wide number of aio requests */
205 unsigned long aio_max_nr
= 0x10000; /* system wide maximum number of aio requests */
206 /*----end sysctl variables---*/
208 static struct kmem_cache
*kiocb_cachep
;
209 static struct kmem_cache
*kioctx_cachep
;
211 static struct vfsmount
*aio_mnt
;
213 static const struct file_operations aio_ring_fops
;
214 static const struct address_space_operations aio_ctx_aops
;
216 static struct file
*aio_private_file(struct kioctx
*ctx
, loff_t nr_pages
)
218 struct qstr
this = QSTR_INIT("[aio]", 5);
221 struct inode
*inode
= alloc_anon_inode(aio_mnt
->mnt_sb
);
223 return ERR_CAST(inode
);
225 inode
->i_mapping
->a_ops
= &aio_ctx_aops
;
226 inode
->i_mapping
->private_data
= ctx
;
227 inode
->i_size
= PAGE_SIZE
* nr_pages
;
229 path
.dentry
= d_alloc_pseudo(aio_mnt
->mnt_sb
, &this);
232 return ERR_PTR(-ENOMEM
);
234 path
.mnt
= mntget(aio_mnt
);
236 d_instantiate(path
.dentry
, inode
);
237 file
= alloc_file(&path
, FMODE_READ
| FMODE_WRITE
, &aio_ring_fops
);
243 file
->f_flags
= O_RDWR
;
247 static struct dentry
*aio_mount(struct file_system_type
*fs_type
,
248 int flags
, const char *dev_name
, void *data
)
250 static const struct dentry_operations ops
= {
251 .d_dname
= simple_dname
,
253 struct dentry
*root
= mount_pseudo(fs_type
, "aio:", NULL
, &ops
,
257 root
->d_sb
->s_iflags
|= SB_I_NOEXEC
;
262 * Creates the slab caches used by the aio routines, panic on
263 * failure as this is done early during the boot sequence.
265 static int __init
aio_setup(void)
267 static struct file_system_type aio_fs
= {
270 .kill_sb
= kill_anon_super
,
272 aio_mnt
= kern_mount(&aio_fs
);
274 panic("Failed to create aio fs mount.");
276 kiocb_cachep
= KMEM_CACHE(aio_kiocb
, SLAB_HWCACHE_ALIGN
|SLAB_PANIC
);
277 kioctx_cachep
= KMEM_CACHE(kioctx
,SLAB_HWCACHE_ALIGN
|SLAB_PANIC
);
280 __initcall(aio_setup
);
282 static void put_aio_ring_file(struct kioctx
*ctx
)
284 struct file
*aio_ring_file
= ctx
->aio_ring_file
;
285 struct address_space
*i_mapping
;
288 truncate_setsize(file_inode(aio_ring_file
), 0);
290 /* Prevent further access to the kioctx from migratepages */
291 i_mapping
= aio_ring_file
->f_mapping
;
292 spin_lock(&i_mapping
->private_lock
);
293 i_mapping
->private_data
= NULL
;
294 ctx
->aio_ring_file
= NULL
;
295 spin_unlock(&i_mapping
->private_lock
);
301 static void aio_free_ring(struct kioctx
*ctx
)
305 /* Disconnect the kiotx from the ring file. This prevents future
306 * accesses to the kioctx from page migration.
308 put_aio_ring_file(ctx
);
310 for (i
= 0; i
< ctx
->nr_pages
; i
++) {
312 pr_debug("pid(%d) [%d] page->count=%d\n", current
->pid
, i
,
313 page_count(ctx
->ring_pages
[i
]));
314 page
= ctx
->ring_pages
[i
];
317 ctx
->ring_pages
[i
] = NULL
;
321 if (ctx
->ring_pages
&& ctx
->ring_pages
!= ctx
->internal_pages
) {
322 kfree(ctx
->ring_pages
);
323 ctx
->ring_pages
= NULL
;
327 static int aio_ring_mremap(struct vm_area_struct
*vma
)
329 struct file
*file
= vma
->vm_file
;
330 struct mm_struct
*mm
= vma
->vm_mm
;
331 struct kioctx_table
*table
;
332 int i
, res
= -EINVAL
;
334 spin_lock(&mm
->ioctx_lock
);
336 table
= rcu_dereference(mm
->ioctx_table
);
337 for (i
= 0; i
< table
->nr
; i
++) {
340 ctx
= rcu_dereference(table
->table
[i
]);
341 if (ctx
&& ctx
->aio_ring_file
== file
) {
342 if (!atomic_read(&ctx
->dead
)) {
343 ctx
->user_id
= ctx
->mmap_base
= vma
->vm_start
;
351 spin_unlock(&mm
->ioctx_lock
);
355 static const struct vm_operations_struct aio_ring_vm_ops
= {
356 .mremap
= aio_ring_mremap
,
357 #if IS_ENABLED(CONFIG_MMU)
358 .fault
= filemap_fault
,
359 .map_pages
= filemap_map_pages
,
360 .page_mkwrite
= filemap_page_mkwrite
,
364 static int aio_ring_mmap(struct file
*file
, struct vm_area_struct
*vma
)
366 vma
->vm_flags
|= VM_DONTEXPAND
;
367 vma
->vm_ops
= &aio_ring_vm_ops
;
371 static const struct file_operations aio_ring_fops
= {
372 .mmap
= aio_ring_mmap
,
375 #if IS_ENABLED(CONFIG_MIGRATION)
376 static int aio_migratepage(struct address_space
*mapping
, struct page
*new,
377 struct page
*old
, enum migrate_mode mode
)
385 * We cannot support the _NO_COPY case here, because copy needs to
386 * happen under the ctx->completion_lock. That does not work with the
387 * migration workflow of MIGRATE_SYNC_NO_COPY.
389 if (mode
== MIGRATE_SYNC_NO_COPY
)
394 /* mapping->private_lock here protects against the kioctx teardown. */
395 spin_lock(&mapping
->private_lock
);
396 ctx
= mapping
->private_data
;
402 /* The ring_lock mutex. The prevents aio_read_events() from writing
403 * to the ring's head, and prevents page migration from mucking in
404 * a partially initialized kiotx.
406 if (!mutex_trylock(&ctx
->ring_lock
)) {
412 if (idx
< (pgoff_t
)ctx
->nr_pages
) {
413 /* Make sure the old page hasn't already been changed */
414 if (ctx
->ring_pages
[idx
] != old
)
422 /* Writeback must be complete */
423 BUG_ON(PageWriteback(old
));
426 rc
= migrate_page_move_mapping(mapping
, new, old
, NULL
, mode
, 1);
427 if (rc
!= MIGRATEPAGE_SUCCESS
) {
432 /* Take completion_lock to prevent other writes to the ring buffer
433 * while the old page is copied to the new. This prevents new
434 * events from being lost.
436 spin_lock_irqsave(&ctx
->completion_lock
, flags
);
437 migrate_page_copy(new, old
);
438 BUG_ON(ctx
->ring_pages
[idx
] != old
);
439 ctx
->ring_pages
[idx
] = new;
440 spin_unlock_irqrestore(&ctx
->completion_lock
, flags
);
442 /* The old page is no longer accessible. */
446 mutex_unlock(&ctx
->ring_lock
);
448 spin_unlock(&mapping
->private_lock
);
453 static const struct address_space_operations aio_ctx_aops
= {
454 .set_page_dirty
= __set_page_dirty_no_writeback
,
455 #if IS_ENABLED(CONFIG_MIGRATION)
456 .migratepage
= aio_migratepage
,
460 static int aio_setup_ring(struct kioctx
*ctx
, unsigned int nr_events
)
462 struct aio_ring
*ring
;
463 struct mm_struct
*mm
= current
->mm
;
464 unsigned long size
, unused
;
469 /* Compensate for the ring buffer's head/tail overlap entry */
470 nr_events
+= 2; /* 1 is required, 2 for good luck */
472 size
= sizeof(struct aio_ring
);
473 size
+= sizeof(struct io_event
) * nr_events
;
475 nr_pages
= PFN_UP(size
);
479 file
= aio_private_file(ctx
, nr_pages
);
481 ctx
->aio_ring_file
= NULL
;
485 ctx
->aio_ring_file
= file
;
486 nr_events
= (PAGE_SIZE
* nr_pages
- sizeof(struct aio_ring
))
487 / sizeof(struct io_event
);
489 ctx
->ring_pages
= ctx
->internal_pages
;
490 if (nr_pages
> AIO_RING_PAGES
) {
491 ctx
->ring_pages
= kcalloc(nr_pages
, sizeof(struct page
*),
493 if (!ctx
->ring_pages
) {
494 put_aio_ring_file(ctx
);
499 for (i
= 0; i
< nr_pages
; i
++) {
501 page
= find_or_create_page(file
->f_mapping
,
502 i
, GFP_HIGHUSER
| __GFP_ZERO
);
505 pr_debug("pid(%d) page[%d]->count=%d\n",
506 current
->pid
, i
, page_count(page
));
507 SetPageUptodate(page
);
510 ctx
->ring_pages
[i
] = page
;
514 if (unlikely(i
!= nr_pages
)) {
519 ctx
->mmap_size
= nr_pages
* PAGE_SIZE
;
520 pr_debug("attempting mmap of %lu bytes\n", ctx
->mmap_size
);
522 if (down_write_killable(&mm
->mmap_sem
)) {
528 ctx
->mmap_base
= do_mmap_pgoff(ctx
->aio_ring_file
, 0, ctx
->mmap_size
,
529 PROT_READ
| PROT_WRITE
,
530 MAP_SHARED
, 0, &unused
, NULL
);
531 up_write(&mm
->mmap_sem
);
532 if (IS_ERR((void *)ctx
->mmap_base
)) {
538 pr_debug("mmap address: 0x%08lx\n", ctx
->mmap_base
);
540 ctx
->user_id
= ctx
->mmap_base
;
541 ctx
->nr_events
= nr_events
; /* trusted copy */
543 ring
= kmap_atomic(ctx
->ring_pages
[0]);
544 ring
->nr
= nr_events
; /* user copy */
546 ring
->head
= ring
->tail
= 0;
547 ring
->magic
= AIO_RING_MAGIC
;
548 ring
->compat_features
= AIO_RING_COMPAT_FEATURES
;
549 ring
->incompat_features
= AIO_RING_INCOMPAT_FEATURES
;
550 ring
->header_length
= sizeof(struct aio_ring
);
552 flush_dcache_page(ctx
->ring_pages
[0]);
557 #define AIO_EVENTS_PER_PAGE (PAGE_SIZE / sizeof(struct io_event))
558 #define AIO_EVENTS_FIRST_PAGE ((PAGE_SIZE - sizeof(struct aio_ring)) / sizeof(struct io_event))
559 #define AIO_EVENTS_OFFSET (AIO_EVENTS_PER_PAGE - AIO_EVENTS_FIRST_PAGE)
561 void kiocb_set_cancel_fn(struct kiocb
*iocb
, kiocb_cancel_fn
*cancel
)
563 struct aio_kiocb
*req
= container_of(iocb
, struct aio_kiocb
, rw
);
564 struct kioctx
*ctx
= req
->ki_ctx
;
567 if (WARN_ON_ONCE(!list_empty(&req
->ki_list
)))
570 spin_lock_irqsave(&ctx
->ctx_lock
, flags
);
571 list_add_tail(&req
->ki_list
, &ctx
->active_reqs
);
572 req
->ki_cancel
= cancel
;
573 spin_unlock_irqrestore(&ctx
->ctx_lock
, flags
);
575 EXPORT_SYMBOL(kiocb_set_cancel_fn
);
578 * free_ioctx() should be RCU delayed to synchronize against the RCU
579 * protected lookup_ioctx() and also needs process context to call
580 * aio_free_ring(). Use rcu_work.
582 static void free_ioctx(struct work_struct
*work
)
584 struct kioctx
*ctx
= container_of(to_rcu_work(work
), struct kioctx
,
586 pr_debug("freeing %p\n", ctx
);
589 free_percpu(ctx
->cpu
);
590 percpu_ref_exit(&ctx
->reqs
);
591 percpu_ref_exit(&ctx
->users
);
592 kmem_cache_free(kioctx_cachep
, ctx
);
595 static void free_ioctx_reqs(struct percpu_ref
*ref
)
597 struct kioctx
*ctx
= container_of(ref
, struct kioctx
, reqs
);
599 /* At this point we know that there are no any in-flight requests */
600 if (ctx
->rq_wait
&& atomic_dec_and_test(&ctx
->rq_wait
->count
))
601 complete(&ctx
->rq_wait
->comp
);
603 /* Synchronize against RCU protected table->table[] dereferences */
604 INIT_RCU_WORK(&ctx
->free_rwork
, free_ioctx
);
605 queue_rcu_work(system_wq
, &ctx
->free_rwork
);
609 * When this function runs, the kioctx has been removed from the "hash table"
610 * and ctx->users has dropped to 0, so we know no more kiocbs can be submitted -
611 * now it's safe to cancel any that need to be.
613 static void free_ioctx_users(struct percpu_ref
*ref
)
615 struct kioctx
*ctx
= container_of(ref
, struct kioctx
, users
);
616 struct aio_kiocb
*req
;
618 spin_lock_irq(&ctx
->ctx_lock
);
620 while (!list_empty(&ctx
->active_reqs
)) {
621 req
= list_first_entry(&ctx
->active_reqs
,
622 struct aio_kiocb
, ki_list
);
623 req
->ki_cancel(&req
->rw
);
624 list_del_init(&req
->ki_list
);
627 spin_unlock_irq(&ctx
->ctx_lock
);
629 percpu_ref_kill(&ctx
->reqs
);
630 percpu_ref_put(&ctx
->reqs
);
633 static int ioctx_add_table(struct kioctx
*ctx
, struct mm_struct
*mm
)
636 struct kioctx_table
*table
, *old
;
637 struct aio_ring
*ring
;
639 spin_lock(&mm
->ioctx_lock
);
640 table
= rcu_dereference_raw(mm
->ioctx_table
);
644 for (i
= 0; i
< table
->nr
; i
++)
645 if (!rcu_access_pointer(table
->table
[i
])) {
647 rcu_assign_pointer(table
->table
[i
], ctx
);
648 spin_unlock(&mm
->ioctx_lock
);
650 /* While kioctx setup is in progress,
651 * we are protected from page migration
652 * changes ring_pages by ->ring_lock.
654 ring
= kmap_atomic(ctx
->ring_pages
[0]);
660 new_nr
= (table
? table
->nr
: 1) * 4;
661 spin_unlock(&mm
->ioctx_lock
);
663 table
= kzalloc(sizeof(*table
) + sizeof(struct kioctx
*) *
670 spin_lock(&mm
->ioctx_lock
);
671 old
= rcu_dereference_raw(mm
->ioctx_table
);
674 rcu_assign_pointer(mm
->ioctx_table
, table
);
675 } else if (table
->nr
> old
->nr
) {
676 memcpy(table
->table
, old
->table
,
677 old
->nr
* sizeof(struct kioctx
*));
679 rcu_assign_pointer(mm
->ioctx_table
, table
);
688 static void aio_nr_sub(unsigned nr
)
690 spin_lock(&aio_nr_lock
);
691 if (WARN_ON(aio_nr
- nr
> aio_nr
))
695 spin_unlock(&aio_nr_lock
);
699 * Allocates and initializes an ioctx. Returns an ERR_PTR if it failed.
701 static struct kioctx
*ioctx_alloc(unsigned nr_events
)
703 struct mm_struct
*mm
= current
->mm
;
708 * Store the original nr_events -- what userspace passed to io_setup(),
709 * for counting against the global limit -- before it changes.
711 unsigned int max_reqs
= nr_events
;
714 * We keep track of the number of available ringbuffer slots, to prevent
715 * overflow (reqs_available), and we also use percpu counters for this.
717 * So since up to half the slots might be on other cpu's percpu counters
718 * and unavailable, double nr_events so userspace sees what they
719 * expected: additionally, we move req_batch slots to/from percpu
720 * counters at a time, so make sure that isn't 0:
722 nr_events
= max(nr_events
, num_possible_cpus() * 4);
725 /* Prevent overflows */
726 if (nr_events
> (0x10000000U
/ sizeof(struct io_event
))) {
727 pr_debug("ENOMEM: nr_events too high\n");
728 return ERR_PTR(-EINVAL
);
731 if (!nr_events
|| (unsigned long)max_reqs
> aio_max_nr
)
732 return ERR_PTR(-EAGAIN
);
734 ctx
= kmem_cache_zalloc(kioctx_cachep
, GFP_KERNEL
);
736 return ERR_PTR(-ENOMEM
);
738 ctx
->max_reqs
= max_reqs
;
740 spin_lock_init(&ctx
->ctx_lock
);
741 spin_lock_init(&ctx
->completion_lock
);
742 mutex_init(&ctx
->ring_lock
);
743 /* Protect against page migration throughout kiotx setup by keeping
744 * the ring_lock mutex held until setup is complete. */
745 mutex_lock(&ctx
->ring_lock
);
746 init_waitqueue_head(&ctx
->wait
);
748 INIT_LIST_HEAD(&ctx
->active_reqs
);
750 if (percpu_ref_init(&ctx
->users
, free_ioctx_users
, 0, GFP_KERNEL
))
753 if (percpu_ref_init(&ctx
->reqs
, free_ioctx_reqs
, 0, GFP_KERNEL
))
756 ctx
->cpu
= alloc_percpu(struct kioctx_cpu
);
760 err
= aio_setup_ring(ctx
, nr_events
);
764 atomic_set(&ctx
->reqs_available
, ctx
->nr_events
- 1);
765 ctx
->req_batch
= (ctx
->nr_events
- 1) / (num_possible_cpus() * 4);
766 if (ctx
->req_batch
< 1)
769 /* limit the number of system wide aios */
770 spin_lock(&aio_nr_lock
);
771 if (aio_nr
+ ctx
->max_reqs
> aio_max_nr
||
772 aio_nr
+ ctx
->max_reqs
< aio_nr
) {
773 spin_unlock(&aio_nr_lock
);
777 aio_nr
+= ctx
->max_reqs
;
778 spin_unlock(&aio_nr_lock
);
780 percpu_ref_get(&ctx
->users
); /* io_setup() will drop this ref */
781 percpu_ref_get(&ctx
->reqs
); /* free_ioctx_users() will drop this */
783 err
= ioctx_add_table(ctx
, mm
);
787 /* Release the ring_lock mutex now that all setup is complete. */
788 mutex_unlock(&ctx
->ring_lock
);
790 pr_debug("allocated ioctx %p[%ld]: mm=%p mask=0x%x\n",
791 ctx
, ctx
->user_id
, mm
, ctx
->nr_events
);
795 aio_nr_sub(ctx
->max_reqs
);
797 atomic_set(&ctx
->dead
, 1);
799 vm_munmap(ctx
->mmap_base
, ctx
->mmap_size
);
802 mutex_unlock(&ctx
->ring_lock
);
803 free_percpu(ctx
->cpu
);
804 percpu_ref_exit(&ctx
->reqs
);
805 percpu_ref_exit(&ctx
->users
);
806 kmem_cache_free(kioctx_cachep
, ctx
);
807 pr_debug("error allocating ioctx %d\n", err
);
812 * Cancels all outstanding aio requests on an aio context. Used
813 * when the processes owning a context have all exited to encourage
814 * the rapid destruction of the kioctx.
816 static int kill_ioctx(struct mm_struct
*mm
, struct kioctx
*ctx
,
817 struct ctx_rq_wait
*wait
)
819 struct kioctx_table
*table
;
821 spin_lock(&mm
->ioctx_lock
);
822 if (atomic_xchg(&ctx
->dead
, 1)) {
823 spin_unlock(&mm
->ioctx_lock
);
827 table
= rcu_dereference_raw(mm
->ioctx_table
);
828 WARN_ON(ctx
!= rcu_access_pointer(table
->table
[ctx
->id
]));
829 RCU_INIT_POINTER(table
->table
[ctx
->id
], NULL
);
830 spin_unlock(&mm
->ioctx_lock
);
832 /* free_ioctx_reqs() will do the necessary RCU synchronization */
833 wake_up_all(&ctx
->wait
);
836 * It'd be more correct to do this in free_ioctx(), after all
837 * the outstanding kiocbs have finished - but by then io_destroy
838 * has already returned, so io_setup() could potentially return
839 * -EAGAIN with no ioctxs actually in use (as far as userspace
842 aio_nr_sub(ctx
->max_reqs
);
845 vm_munmap(ctx
->mmap_base
, ctx
->mmap_size
);
848 percpu_ref_kill(&ctx
->users
);
853 * exit_aio: called when the last user of mm goes away. At this point, there is
854 * no way for any new requests to be submited or any of the io_* syscalls to be
855 * called on the context.
857 * There may be outstanding kiocbs, but free_ioctx() will explicitly wait on
860 void exit_aio(struct mm_struct
*mm
)
862 struct kioctx_table
*table
= rcu_dereference_raw(mm
->ioctx_table
);
863 struct ctx_rq_wait wait
;
869 atomic_set(&wait
.count
, table
->nr
);
870 init_completion(&wait
.comp
);
873 for (i
= 0; i
< table
->nr
; ++i
) {
875 rcu_dereference_protected(table
->table
[i
], true);
883 * We don't need to bother with munmap() here - exit_mmap(mm)
884 * is coming and it'll unmap everything. And we simply can't,
885 * this is not necessarily our ->mm.
886 * Since kill_ioctx() uses non-zero ->mmap_size as indicator
887 * that it needs to unmap the area, just set it to 0.
890 kill_ioctx(mm
, ctx
, &wait
);
893 if (!atomic_sub_and_test(skipped
, &wait
.count
)) {
894 /* Wait until all IO for the context are done. */
895 wait_for_completion(&wait
.comp
);
898 RCU_INIT_POINTER(mm
->ioctx_table
, NULL
);
902 static void put_reqs_available(struct kioctx
*ctx
, unsigned nr
)
904 struct kioctx_cpu
*kcpu
;
907 local_irq_save(flags
);
908 kcpu
= this_cpu_ptr(ctx
->cpu
);
909 kcpu
->reqs_available
+= nr
;
911 while (kcpu
->reqs_available
>= ctx
->req_batch
* 2) {
912 kcpu
->reqs_available
-= ctx
->req_batch
;
913 atomic_add(ctx
->req_batch
, &ctx
->reqs_available
);
916 local_irq_restore(flags
);
919 static bool get_reqs_available(struct kioctx
*ctx
)
921 struct kioctx_cpu
*kcpu
;
925 local_irq_save(flags
);
926 kcpu
= this_cpu_ptr(ctx
->cpu
);
927 if (!kcpu
->reqs_available
) {
928 int old
, avail
= atomic_read(&ctx
->reqs_available
);
931 if (avail
< ctx
->req_batch
)
935 avail
= atomic_cmpxchg(&ctx
->reqs_available
,
936 avail
, avail
- ctx
->req_batch
);
937 } while (avail
!= old
);
939 kcpu
->reqs_available
+= ctx
->req_batch
;
943 kcpu
->reqs_available
--;
945 local_irq_restore(flags
);
949 /* refill_reqs_available
950 * Updates the reqs_available reference counts used for tracking the
951 * number of free slots in the completion ring. This can be called
952 * from aio_complete() (to optimistically update reqs_available) or
953 * from aio_get_req() (the we're out of events case). It must be
954 * called holding ctx->completion_lock.
956 static void refill_reqs_available(struct kioctx
*ctx
, unsigned head
,
959 unsigned events_in_ring
, completed
;
961 /* Clamp head since userland can write to it. */
962 head
%= ctx
->nr_events
;
964 events_in_ring
= tail
- head
;
966 events_in_ring
= ctx
->nr_events
- (head
- tail
);
968 completed
= ctx
->completed_events
;
969 if (events_in_ring
< completed
)
970 completed
-= events_in_ring
;
977 ctx
->completed_events
-= completed
;
978 put_reqs_available(ctx
, completed
);
981 /* user_refill_reqs_available
982 * Called to refill reqs_available when aio_get_req() encounters an
983 * out of space in the completion ring.
985 static void user_refill_reqs_available(struct kioctx
*ctx
)
987 spin_lock_irq(&ctx
->completion_lock
);
988 if (ctx
->completed_events
) {
989 struct aio_ring
*ring
;
992 /* Access of ring->head may race with aio_read_events_ring()
993 * here, but that's okay since whether we read the old version
994 * or the new version, and either will be valid. The important
995 * part is that head cannot pass tail since we prevent
996 * aio_complete() from updating tail by holding
997 * ctx->completion_lock. Even if head is invalid, the check
998 * against ctx->completed_events below will make sure we do the
1001 ring
= kmap_atomic(ctx
->ring_pages
[0]);
1003 kunmap_atomic(ring
);
1005 refill_reqs_available(ctx
, head
, ctx
->tail
);
1008 spin_unlock_irq(&ctx
->completion_lock
);
1012 * Allocate a slot for an aio request.
1013 * Returns NULL if no requests are free.
1015 static inline struct aio_kiocb
*aio_get_req(struct kioctx
*ctx
)
1017 struct aio_kiocb
*req
;
1019 if (!get_reqs_available(ctx
)) {
1020 user_refill_reqs_available(ctx
);
1021 if (!get_reqs_available(ctx
))
1025 req
= kmem_cache_alloc(kiocb_cachep
, GFP_KERNEL
|__GFP_ZERO
);
1029 percpu_ref_get(&ctx
->reqs
);
1030 INIT_LIST_HEAD(&req
->ki_list
);
1034 put_reqs_available(ctx
, 1);
1038 static struct kioctx
*lookup_ioctx(unsigned long ctx_id
)
1040 struct aio_ring __user
*ring
= (void __user
*)ctx_id
;
1041 struct mm_struct
*mm
= current
->mm
;
1042 struct kioctx
*ctx
, *ret
= NULL
;
1043 struct kioctx_table
*table
;
1046 if (get_user(id
, &ring
->id
))
1050 table
= rcu_dereference(mm
->ioctx_table
);
1052 if (!table
|| id
>= table
->nr
)
1055 ctx
= rcu_dereference(table
->table
[id
]);
1056 if (ctx
&& ctx
->user_id
== ctx_id
) {
1057 if (percpu_ref_tryget_live(&ctx
->users
))
1066 * Called when the io request on the given iocb is complete.
1068 static void aio_complete(struct aio_kiocb
*iocb
, long res
, long res2
)
1070 struct kioctx
*ctx
= iocb
->ki_ctx
;
1071 struct aio_ring
*ring
;
1072 struct io_event
*ev_page
, *event
;
1073 unsigned tail
, pos
, head
;
1074 unsigned long flags
;
1077 * Add a completion event to the ring buffer. Must be done holding
1078 * ctx->completion_lock to prevent other code from messing with the tail
1079 * pointer since we might be called from irq context.
1081 spin_lock_irqsave(&ctx
->completion_lock
, flags
);
1084 pos
= tail
+ AIO_EVENTS_OFFSET
;
1086 if (++tail
>= ctx
->nr_events
)
1089 ev_page
= kmap_atomic(ctx
->ring_pages
[pos
/ AIO_EVENTS_PER_PAGE
]);
1090 event
= ev_page
+ pos
% AIO_EVENTS_PER_PAGE
;
1092 event
->obj
= (u64
)(unsigned long)iocb
->ki_user_iocb
;
1093 event
->data
= iocb
->ki_user_data
;
1097 kunmap_atomic(ev_page
);
1098 flush_dcache_page(ctx
->ring_pages
[pos
/ AIO_EVENTS_PER_PAGE
]);
1100 pr_debug("%p[%u]: %p: %p %Lx %lx %lx\n",
1101 ctx
, tail
, iocb
, iocb
->ki_user_iocb
, iocb
->ki_user_data
,
1104 /* after flagging the request as done, we
1105 * must never even look at it again
1107 smp_wmb(); /* make event visible before updating tail */
1111 ring
= kmap_atomic(ctx
->ring_pages
[0]);
1114 kunmap_atomic(ring
);
1115 flush_dcache_page(ctx
->ring_pages
[0]);
1117 ctx
->completed_events
++;
1118 if (ctx
->completed_events
> 1)
1119 refill_reqs_available(ctx
, head
, tail
);
1120 spin_unlock_irqrestore(&ctx
->completion_lock
, flags
);
1122 pr_debug("added to ring %p at [%u]\n", iocb
, tail
);
1125 * Check if the user asked us to deliver the result through an
1126 * eventfd. The eventfd_signal() function is safe to be called
1129 if (iocb
->ki_eventfd
) {
1130 eventfd_signal(iocb
->ki_eventfd
, 1);
1131 eventfd_ctx_put(iocb
->ki_eventfd
);
1134 kmem_cache_free(kiocb_cachep
, iocb
);
1137 * We have to order our ring_info tail store above and test
1138 * of the wait list below outside the wait lock. This is
1139 * like in wake_up_bit() where clearing a bit has to be
1140 * ordered with the unlocked test.
1144 if (waitqueue_active(&ctx
->wait
))
1145 wake_up(&ctx
->wait
);
1147 percpu_ref_put(&ctx
->reqs
);
1150 /* aio_read_events_ring
1151 * Pull an event off of the ioctx's event ring. Returns the number of
1154 static long aio_read_events_ring(struct kioctx
*ctx
,
1155 struct io_event __user
*event
, long nr
)
1157 struct aio_ring
*ring
;
1158 unsigned head
, tail
, pos
;
1163 * The mutex can block and wake us up and that will cause
1164 * wait_event_interruptible_hrtimeout() to schedule without sleeping
1165 * and repeat. This should be rare enough that it doesn't cause
1166 * peformance issues. See the comment in read_events() for more detail.
1168 sched_annotate_sleep();
1169 mutex_lock(&ctx
->ring_lock
);
1171 /* Access to ->ring_pages here is protected by ctx->ring_lock. */
1172 ring
= kmap_atomic(ctx
->ring_pages
[0]);
1175 kunmap_atomic(ring
);
1178 * Ensure that once we've read the current tail pointer, that
1179 * we also see the events that were stored up to the tail.
1183 pr_debug("h%u t%u m%u\n", head
, tail
, ctx
->nr_events
);
1188 head
%= ctx
->nr_events
;
1189 tail
%= ctx
->nr_events
;
1193 struct io_event
*ev
;
1196 avail
= (head
<= tail
? tail
: ctx
->nr_events
) - head
;
1200 pos
= head
+ AIO_EVENTS_OFFSET
;
1201 page
= ctx
->ring_pages
[pos
/ AIO_EVENTS_PER_PAGE
];
1202 pos
%= AIO_EVENTS_PER_PAGE
;
1204 avail
= min(avail
, nr
- ret
);
1205 avail
= min_t(long, avail
, AIO_EVENTS_PER_PAGE
- pos
);
1208 copy_ret
= copy_to_user(event
+ ret
, ev
+ pos
,
1209 sizeof(*ev
) * avail
);
1212 if (unlikely(copy_ret
)) {
1219 head
%= ctx
->nr_events
;
1222 ring
= kmap_atomic(ctx
->ring_pages
[0]);
1224 kunmap_atomic(ring
);
1225 flush_dcache_page(ctx
->ring_pages
[0]);
1227 pr_debug("%li h%u t%u\n", ret
, head
, tail
);
1229 mutex_unlock(&ctx
->ring_lock
);
1234 static bool aio_read_events(struct kioctx
*ctx
, long min_nr
, long nr
,
1235 struct io_event __user
*event
, long *i
)
1237 long ret
= aio_read_events_ring(ctx
, event
+ *i
, nr
- *i
);
1242 if (unlikely(atomic_read(&ctx
->dead
)))
1248 return ret
< 0 || *i
>= min_nr
;
1251 static long read_events(struct kioctx
*ctx
, long min_nr
, long nr
,
1252 struct io_event __user
*event
,
1258 * Note that aio_read_events() is being called as the conditional - i.e.
1259 * we're calling it after prepare_to_wait() has set task state to
1260 * TASK_INTERRUPTIBLE.
1262 * But aio_read_events() can block, and if it blocks it's going to flip
1263 * the task state back to TASK_RUNNING.
1265 * This should be ok, provided it doesn't flip the state back to
1266 * TASK_RUNNING and return 0 too much - that causes us to spin. That
1267 * will only happen if the mutex_lock() call blocks, and we then find
1268 * the ringbuffer empty. So in practice we should be ok, but it's
1269 * something to be aware of when touching this code.
1272 aio_read_events(ctx
, min_nr
, nr
, event
, &ret
);
1274 wait_event_interruptible_hrtimeout(ctx
->wait
,
1275 aio_read_events(ctx
, min_nr
, nr
, event
, &ret
),
1281 * Create an aio_context capable of receiving at least nr_events.
1282 * ctxp must not point to an aio_context that already exists, and
1283 * must be initialized to 0 prior to the call. On successful
1284 * creation of the aio_context, *ctxp is filled in with the resulting
1285 * handle. May fail with -EINVAL if *ctxp is not initialized,
1286 * if the specified nr_events exceeds internal limits. May fail
1287 * with -EAGAIN if the specified nr_events exceeds the user's limit
1288 * of available events. May fail with -ENOMEM if insufficient kernel
1289 * resources are available. May fail with -EFAULT if an invalid
1290 * pointer is passed for ctxp. Will fail with -ENOSYS if not
1293 SYSCALL_DEFINE2(io_setup
, unsigned, nr_events
, aio_context_t __user
*, ctxp
)
1295 struct kioctx
*ioctx
= NULL
;
1299 ret
= get_user(ctx
, ctxp
);
1304 if (unlikely(ctx
|| nr_events
== 0)) {
1305 pr_debug("EINVAL: ctx %lu nr_events %u\n",
1310 ioctx
= ioctx_alloc(nr_events
);
1311 ret
= PTR_ERR(ioctx
);
1312 if (!IS_ERR(ioctx
)) {
1313 ret
= put_user(ioctx
->user_id
, ctxp
);
1315 kill_ioctx(current
->mm
, ioctx
, NULL
);
1316 percpu_ref_put(&ioctx
->users
);
1323 #ifdef CONFIG_COMPAT
1324 COMPAT_SYSCALL_DEFINE2(io_setup
, unsigned, nr_events
, u32 __user
*, ctx32p
)
1326 struct kioctx
*ioctx
= NULL
;
1330 ret
= get_user(ctx
, ctx32p
);
1335 if (unlikely(ctx
|| nr_events
== 0)) {
1336 pr_debug("EINVAL: ctx %lu nr_events %u\n",
1341 ioctx
= ioctx_alloc(nr_events
);
1342 ret
= PTR_ERR(ioctx
);
1343 if (!IS_ERR(ioctx
)) {
1344 /* truncating is ok because it's a user address */
1345 ret
= put_user((u32
)ioctx
->user_id
, ctx32p
);
1347 kill_ioctx(current
->mm
, ioctx
, NULL
);
1348 percpu_ref_put(&ioctx
->users
);
1357 * Destroy the aio_context specified. May cancel any outstanding
1358 * AIOs and block on completion. Will fail with -ENOSYS if not
1359 * implemented. May fail with -EINVAL if the context pointed to
1362 SYSCALL_DEFINE1(io_destroy
, aio_context_t
, ctx
)
1364 struct kioctx
*ioctx
= lookup_ioctx(ctx
);
1365 if (likely(NULL
!= ioctx
)) {
1366 struct ctx_rq_wait wait
;
1369 init_completion(&wait
.comp
);
1370 atomic_set(&wait
.count
, 1);
1372 /* Pass requests_done to kill_ioctx() where it can be set
1373 * in a thread-safe way. If we try to set it here then we have
1374 * a race condition if two io_destroy() called simultaneously.
1376 ret
= kill_ioctx(current
->mm
, ioctx
, &wait
);
1377 percpu_ref_put(&ioctx
->users
);
1379 /* Wait until all IO for the context are done. Otherwise kernel
1380 * keep using user-space buffers even if user thinks the context
1384 wait_for_completion(&wait
.comp
);
1388 pr_debug("EINVAL: invalid context id\n");
1392 static void aio_remove_iocb(struct aio_kiocb
*iocb
)
1394 struct kioctx
*ctx
= iocb
->ki_ctx
;
1395 unsigned long flags
;
1397 spin_lock_irqsave(&ctx
->ctx_lock
, flags
);
1398 list_del(&iocb
->ki_list
);
1399 spin_unlock_irqrestore(&ctx
->ctx_lock
, flags
);
1402 static void aio_complete_rw(struct kiocb
*kiocb
, long res
, long res2
)
1404 struct aio_kiocb
*iocb
= container_of(kiocb
, struct aio_kiocb
, rw
);
1406 if (!list_empty_careful(&iocb
->ki_list
))
1407 aio_remove_iocb(iocb
);
1409 if (kiocb
->ki_flags
& IOCB_WRITE
) {
1410 struct inode
*inode
= file_inode(kiocb
->ki_filp
);
1413 * Tell lockdep we inherited freeze protection from submission
1416 if (S_ISREG(inode
->i_mode
))
1417 __sb_writers_acquired(inode
->i_sb
, SB_FREEZE_WRITE
);
1418 file_end_write(kiocb
->ki_filp
);
1421 fput(kiocb
->ki_filp
);
1422 aio_complete(iocb
, res
, res2
);
1425 static int aio_prep_rw(struct kiocb
*req
, struct iocb
*iocb
)
1429 req
->ki_filp
= fget(iocb
->aio_fildes
);
1430 if (unlikely(!req
->ki_filp
))
1432 req
->ki_complete
= aio_complete_rw
;
1433 req
->ki_pos
= iocb
->aio_offset
;
1434 req
->ki_flags
= iocb_flags(req
->ki_filp
);
1435 if (iocb
->aio_flags
& IOCB_FLAG_RESFD
)
1436 req
->ki_flags
|= IOCB_EVENTFD
;
1437 req
->ki_hint
= ki_hint_validate(file_write_hint(req
->ki_filp
));
1438 if (iocb
->aio_flags
& IOCB_FLAG_IOPRIO
) {
1440 * If the IOCB_FLAG_IOPRIO flag of aio_flags is set, then
1441 * aio_reqprio is interpreted as an I/O scheduling
1442 * class and priority.
1444 ret
= ioprio_check_cap(iocb
->aio_reqprio
);
1446 pr_debug("aio ioprio check cap error: %d\n", ret
);
1450 req
->ki_ioprio
= iocb
->aio_reqprio
;
1452 req
->ki_ioprio
= IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE
, 0);
1454 ret
= kiocb_set_rw_flags(req
, iocb
->aio_rw_flags
);
1460 static int aio_setup_rw(int rw
, struct iocb
*iocb
, struct iovec
**iovec
,
1461 bool vectored
, bool compat
, struct iov_iter
*iter
)
1463 void __user
*buf
= (void __user
*)(uintptr_t)iocb
->aio_buf
;
1464 size_t len
= iocb
->aio_nbytes
;
1467 ssize_t ret
= import_single_range(rw
, buf
, len
, *iovec
, iter
);
1471 #ifdef CONFIG_COMPAT
1473 return compat_import_iovec(rw
, buf
, len
, UIO_FASTIOV
, iovec
,
1476 return import_iovec(rw
, buf
, len
, UIO_FASTIOV
, iovec
, iter
);
1479 static inline void aio_rw_done(struct kiocb
*req
, ssize_t ret
)
1485 case -ERESTARTNOINTR
:
1486 case -ERESTARTNOHAND
:
1487 case -ERESTART_RESTARTBLOCK
:
1489 * There's no easy way to restart the syscall since other AIO's
1490 * may be already running. Just fail this IO with EINTR.
1495 aio_complete_rw(req
, ret
, 0);
1499 static ssize_t
aio_read(struct kiocb
*req
, struct iocb
*iocb
, bool vectored
,
1502 struct iovec inline_vecs
[UIO_FASTIOV
], *iovec
= inline_vecs
;
1503 struct iov_iter iter
;
1507 ret
= aio_prep_rw(req
, iocb
);
1510 file
= req
->ki_filp
;
1513 if (unlikely(!(file
->f_mode
& FMODE_READ
)))
1516 if (unlikely(!file
->f_op
->read_iter
))
1519 ret
= aio_setup_rw(READ
, iocb
, &iovec
, vectored
, compat
, &iter
);
1522 ret
= rw_verify_area(READ
, file
, &req
->ki_pos
, iov_iter_count(&iter
));
1524 aio_rw_done(req
, call_read_iter(file
, req
, &iter
));
1532 static ssize_t
aio_write(struct kiocb
*req
, struct iocb
*iocb
, bool vectored
,
1535 struct iovec inline_vecs
[UIO_FASTIOV
], *iovec
= inline_vecs
;
1536 struct iov_iter iter
;
1540 ret
= aio_prep_rw(req
, iocb
);
1543 file
= req
->ki_filp
;
1546 if (unlikely(!(file
->f_mode
& FMODE_WRITE
)))
1549 if (unlikely(!file
->f_op
->write_iter
))
1552 ret
= aio_setup_rw(WRITE
, iocb
, &iovec
, vectored
, compat
, &iter
);
1555 ret
= rw_verify_area(WRITE
, file
, &req
->ki_pos
, iov_iter_count(&iter
));
1558 * Open-code file_start_write here to grab freeze protection,
1559 * which will be released by another thread in
1560 * aio_complete_rw(). Fool lockdep by telling it the lock got
1561 * released so that it doesn't complain about the held lock when
1562 * we return to userspace.
1564 if (S_ISREG(file_inode(file
)->i_mode
)) {
1565 __sb_start_write(file_inode(file
)->i_sb
, SB_FREEZE_WRITE
, true);
1566 __sb_writers_release(file_inode(file
)->i_sb
, SB_FREEZE_WRITE
);
1568 req
->ki_flags
|= IOCB_WRITE
;
1569 aio_rw_done(req
, call_write_iter(file
, req
, &iter
));
1578 static void aio_fsync_work(struct work_struct
*work
)
1580 struct fsync_iocb
*req
= container_of(work
, struct fsync_iocb
, work
);
1583 ret
= vfs_fsync(req
->file
, req
->datasync
);
1585 aio_complete(container_of(req
, struct aio_kiocb
, fsync
), ret
, 0);
1588 static int aio_fsync(struct fsync_iocb
*req
, struct iocb
*iocb
, bool datasync
)
1590 if (unlikely(iocb
->aio_buf
|| iocb
->aio_offset
|| iocb
->aio_nbytes
||
1591 iocb
->aio_rw_flags
))
1593 req
->file
= fget(iocb
->aio_fildes
);
1594 if (unlikely(!req
->file
))
1596 if (unlikely(!req
->file
->f_op
->fsync
)) {
1601 req
->datasync
= datasync
;
1602 INIT_WORK(&req
->work
, aio_fsync_work
);
1603 schedule_work(&req
->work
);
1607 /* need to use list_del_init so we can check if item was present */
1608 static inline bool __aio_poll_remove(struct poll_iocb
*req
)
1610 if (list_empty(&req
->wait
.entry
))
1612 list_del_init(&req
->wait
.entry
);
1616 static inline void __aio_poll_complete(struct aio_kiocb
*iocb
, __poll_t mask
)
1618 fput(iocb
->poll
.file
);
1619 aio_complete(iocb
, mangle_poll(mask
), 0);
1622 static void aio_poll_work(struct work_struct
*work
)
1624 struct aio_kiocb
*iocb
= container_of(work
, struct aio_kiocb
, poll
.work
);
1626 if (!list_empty_careful(&iocb
->ki_list
))
1627 aio_remove_iocb(iocb
);
1628 __aio_poll_complete(iocb
, iocb
->poll
.events
);
1631 static int aio_poll_cancel(struct kiocb
*iocb
)
1633 struct aio_kiocb
*aiocb
= container_of(iocb
, struct aio_kiocb
, rw
);
1634 struct poll_iocb
*req
= &aiocb
->poll
;
1635 struct wait_queue_head
*head
= req
->head
;
1638 spin_lock(&head
->lock
);
1639 found
= __aio_poll_remove(req
);
1640 spin_unlock(&head
->lock
);
1644 INIT_WORK(&req
->work
, aio_poll_work
);
1645 schedule_work(&req
->work
);
1650 static int aio_poll_wake(struct wait_queue_entry
*wait
, unsigned mode
, int sync
,
1653 struct poll_iocb
*req
= container_of(wait
, struct poll_iocb
, wait
);
1654 struct aio_kiocb
*iocb
= container_of(req
, struct aio_kiocb
, poll
);
1655 struct file
*file
= req
->file
;
1656 __poll_t mask
= key_to_poll(key
);
1658 assert_spin_locked(&req
->head
->lock
);
1660 /* for instances that support it check for an event match first: */
1661 if (mask
&& !(mask
& req
->events
))
1664 mask
= file
->f_op
->poll_mask(file
, req
->events
) & req
->events
;
1668 __aio_poll_remove(req
);
1671 * Try completing without a context switch if we can acquire ctx_lock
1672 * without spinning. Otherwise we need to defer to a workqueue to
1673 * avoid a deadlock due to the lock order.
1675 if (spin_trylock(&iocb
->ki_ctx
->ctx_lock
)) {
1676 list_del_init(&iocb
->ki_list
);
1677 spin_unlock(&iocb
->ki_ctx
->ctx_lock
);
1679 __aio_poll_complete(iocb
, mask
);
1682 INIT_WORK(&req
->work
, aio_poll_work
);
1683 schedule_work(&req
->work
);
1689 static ssize_t
aio_poll(struct aio_kiocb
*aiocb
, struct iocb
*iocb
)
1691 struct kioctx
*ctx
= aiocb
->ki_ctx
;
1692 struct poll_iocb
*req
= &aiocb
->poll
;
1695 /* reject any unknown events outside the normal event mask. */
1696 if ((u16
)iocb
->aio_buf
!= iocb
->aio_buf
)
1698 /* reject fields that are not defined for poll */
1699 if (iocb
->aio_offset
|| iocb
->aio_nbytes
|| iocb
->aio_rw_flags
)
1702 req
->events
= demangle_poll(iocb
->aio_buf
) | EPOLLERR
| EPOLLHUP
;
1703 req
->file
= fget(iocb
->aio_fildes
);
1704 if (unlikely(!req
->file
))
1706 if (!file_has_poll_mask(req
->file
))
1709 req
->head
= req
->file
->f_op
->get_poll_head(req
->file
, req
->events
);
1712 if (IS_ERR(req
->head
)) {
1717 init_waitqueue_func_entry(&req
->wait
, aio_poll_wake
);
1718 aiocb
->ki_cancel
= aio_poll_cancel
;
1720 spin_lock_irq(&ctx
->ctx_lock
);
1721 spin_lock(&req
->head
->lock
);
1722 mask
= req
->file
->f_op
->poll_mask(req
->file
, req
->events
) & req
->events
;
1724 __add_wait_queue(req
->head
, &req
->wait
);
1725 list_add_tail(&aiocb
->ki_list
, &ctx
->active_reqs
);
1727 spin_unlock(&req
->head
->lock
);
1728 spin_unlock_irq(&ctx
->ctx_lock
);
1731 __aio_poll_complete(aiocb
, mask
);
1735 return -EINVAL
; /* same as no support for IOCB_CMD_POLL */
1738 static int io_submit_one(struct kioctx
*ctx
, struct iocb __user
*user_iocb
,
1741 struct aio_kiocb
*req
;
1745 if (unlikely(copy_from_user(&iocb
, user_iocb
, sizeof(iocb
))))
1748 /* enforce forwards compatibility on users */
1749 if (unlikely(iocb
.aio_reserved2
)) {
1750 pr_debug("EINVAL: reserve field set\n");
1754 /* prevent overflows */
1756 (iocb
.aio_buf
!= (unsigned long)iocb
.aio_buf
) ||
1757 (iocb
.aio_nbytes
!= (size_t)iocb
.aio_nbytes
) ||
1758 ((ssize_t
)iocb
.aio_nbytes
< 0)
1760 pr_debug("EINVAL: overflow check\n");
1764 req
= aio_get_req(ctx
);
1768 if (iocb
.aio_flags
& IOCB_FLAG_RESFD
) {
1770 * If the IOCB_FLAG_RESFD flag of aio_flags is set, get an
1771 * instance of the file* now. The file descriptor must be
1772 * an eventfd() fd, and will be signaled for each completed
1773 * event using the eventfd_signal() function.
1775 req
->ki_eventfd
= eventfd_ctx_fdget((int) iocb
.aio_resfd
);
1776 if (IS_ERR(req
->ki_eventfd
)) {
1777 ret
= PTR_ERR(req
->ki_eventfd
);
1778 req
->ki_eventfd
= NULL
;
1783 ret
= put_user(KIOCB_KEY
, &user_iocb
->aio_key
);
1784 if (unlikely(ret
)) {
1785 pr_debug("EFAULT: aio_key\n");
1789 req
->ki_user_iocb
= user_iocb
;
1790 req
->ki_user_data
= iocb
.aio_data
;
1792 switch (iocb
.aio_lio_opcode
) {
1793 case IOCB_CMD_PREAD
:
1794 ret
= aio_read(&req
->rw
, &iocb
, false, compat
);
1796 case IOCB_CMD_PWRITE
:
1797 ret
= aio_write(&req
->rw
, &iocb
, false, compat
);
1799 case IOCB_CMD_PREADV
:
1800 ret
= aio_read(&req
->rw
, &iocb
, true, compat
);
1802 case IOCB_CMD_PWRITEV
:
1803 ret
= aio_write(&req
->rw
, &iocb
, true, compat
);
1805 case IOCB_CMD_FSYNC
:
1806 ret
= aio_fsync(&req
->fsync
, &iocb
, false);
1808 case IOCB_CMD_FDSYNC
:
1809 ret
= aio_fsync(&req
->fsync
, &iocb
, true);
1812 ret
= aio_poll(req
, &iocb
);
1815 pr_debug("invalid aio operation %d\n", iocb
.aio_lio_opcode
);
1821 * If ret is 0, we'd either done aio_complete() ourselves or have
1822 * arranged for that to be done asynchronously. Anything non-zero
1823 * means that we need to destroy req ourselves.
1829 put_reqs_available(ctx
, 1);
1830 percpu_ref_put(&ctx
->reqs
);
1831 if (req
->ki_eventfd
)
1832 eventfd_ctx_put(req
->ki_eventfd
);
1833 kmem_cache_free(kiocb_cachep
, req
);
1838 * Queue the nr iocbs pointed to by iocbpp for processing. Returns
1839 * the number of iocbs queued. May return -EINVAL if the aio_context
1840 * specified by ctx_id is invalid, if nr is < 0, if the iocb at
1841 * *iocbpp[0] is not properly initialized, if the operation specified
1842 * is invalid for the file descriptor in the iocb. May fail with
1843 * -EFAULT if any of the data structures point to invalid data. May
1844 * fail with -EBADF if the file descriptor specified in the first
1845 * iocb is invalid. May fail with -EAGAIN if insufficient resources
1846 * are available to queue any iocbs. Will return 0 if nr is 0. Will
1847 * fail with -ENOSYS if not implemented.
1849 SYSCALL_DEFINE3(io_submit
, aio_context_t
, ctx_id
, long, nr
,
1850 struct iocb __user
* __user
*, iocbpp
)
1855 struct blk_plug plug
;
1857 if (unlikely(nr
< 0))
1860 ctx
= lookup_ioctx(ctx_id
);
1861 if (unlikely(!ctx
)) {
1862 pr_debug("EINVAL: invalid context id\n");
1866 if (nr
> ctx
->nr_events
)
1867 nr
= ctx
->nr_events
;
1869 blk_start_plug(&plug
);
1870 for (i
= 0; i
< nr
; i
++) {
1871 struct iocb __user
*user_iocb
;
1873 if (unlikely(get_user(user_iocb
, iocbpp
+ i
))) {
1878 ret
= io_submit_one(ctx
, user_iocb
, false);
1882 blk_finish_plug(&plug
);
1884 percpu_ref_put(&ctx
->users
);
1888 #ifdef CONFIG_COMPAT
1889 COMPAT_SYSCALL_DEFINE3(io_submit
, compat_aio_context_t
, ctx_id
,
1890 int, nr
, compat_uptr_t __user
*, iocbpp
)
1895 struct blk_plug plug
;
1897 if (unlikely(nr
< 0))
1900 ctx
= lookup_ioctx(ctx_id
);
1901 if (unlikely(!ctx
)) {
1902 pr_debug("EINVAL: invalid context id\n");
1906 if (nr
> ctx
->nr_events
)
1907 nr
= ctx
->nr_events
;
1909 blk_start_plug(&plug
);
1910 for (i
= 0; i
< nr
; i
++) {
1911 compat_uptr_t user_iocb
;
1913 if (unlikely(get_user(user_iocb
, iocbpp
+ i
))) {
1918 ret
= io_submit_one(ctx
, compat_ptr(user_iocb
), true);
1922 blk_finish_plug(&plug
);
1924 percpu_ref_put(&ctx
->users
);
1930 * Finds a given iocb for cancellation.
1932 static struct aio_kiocb
*
1933 lookup_kiocb(struct kioctx
*ctx
, struct iocb __user
*iocb
)
1935 struct aio_kiocb
*kiocb
;
1937 assert_spin_locked(&ctx
->ctx_lock
);
1939 /* TODO: use a hash or array, this sucks. */
1940 list_for_each_entry(kiocb
, &ctx
->active_reqs
, ki_list
) {
1941 if (kiocb
->ki_user_iocb
== iocb
)
1948 * Attempts to cancel an iocb previously passed to io_submit. If
1949 * the operation is successfully cancelled, the resulting event is
1950 * copied into the memory pointed to by result without being placed
1951 * into the completion queue and 0 is returned. May fail with
1952 * -EFAULT if any of the data structures pointed to are invalid.
1953 * May fail with -EINVAL if aio_context specified by ctx_id is
1954 * invalid. May fail with -EAGAIN if the iocb specified was not
1955 * cancelled. Will fail with -ENOSYS if not implemented.
1957 SYSCALL_DEFINE3(io_cancel
, aio_context_t
, ctx_id
, struct iocb __user
*, iocb
,
1958 struct io_event __user
*, result
)
1961 struct aio_kiocb
*kiocb
;
1965 if (unlikely(get_user(key
, &iocb
->aio_key
)))
1967 if (unlikely(key
!= KIOCB_KEY
))
1970 ctx
= lookup_ioctx(ctx_id
);
1974 spin_lock_irq(&ctx
->ctx_lock
);
1975 kiocb
= lookup_kiocb(ctx
, iocb
);
1977 ret
= kiocb
->ki_cancel(&kiocb
->rw
);
1978 list_del_init(&kiocb
->ki_list
);
1980 spin_unlock_irq(&ctx
->ctx_lock
);
1984 * The result argument is no longer used - the io_event is
1985 * always delivered via the ring buffer. -EINPROGRESS indicates
1986 * cancellation is progress:
1991 percpu_ref_put(&ctx
->users
);
1996 static long do_io_getevents(aio_context_t ctx_id
,
1999 struct io_event __user
*events
,
2000 struct timespec64
*ts
)
2002 ktime_t until
= ts
? timespec64_to_ktime(*ts
) : KTIME_MAX
;
2003 struct kioctx
*ioctx
= lookup_ioctx(ctx_id
);
2006 if (likely(ioctx
)) {
2007 if (likely(min_nr
<= nr
&& min_nr
>= 0))
2008 ret
= read_events(ioctx
, min_nr
, nr
, events
, until
);
2009 percpu_ref_put(&ioctx
->users
);
2016 * Attempts to read at least min_nr events and up to nr events from
2017 * the completion queue for the aio_context specified by ctx_id. If
2018 * it succeeds, the number of read events is returned. May fail with
2019 * -EINVAL if ctx_id is invalid, if min_nr is out of range, if nr is
2020 * out of range, if timeout is out of range. May fail with -EFAULT
2021 * if any of the memory specified is invalid. May return 0 or
2022 * < min_nr if the timeout specified by timeout has elapsed
2023 * before sufficient events are available, where timeout == NULL
2024 * specifies an infinite timeout. Note that the timeout pointed to by
2025 * timeout is relative. Will fail with -ENOSYS if not implemented.
2027 SYSCALL_DEFINE5(io_getevents
, aio_context_t
, ctx_id
,
2030 struct io_event __user
*, events
,
2031 struct timespec __user
*, timeout
)
2033 struct timespec64 ts
;
2036 if (timeout
&& unlikely(get_timespec64(&ts
, timeout
)))
2039 ret
= do_io_getevents(ctx_id
, min_nr
, nr
, events
, timeout
? &ts
: NULL
);
2040 if (!ret
&& signal_pending(current
))
2045 SYSCALL_DEFINE6(io_pgetevents
,
2046 aio_context_t
, ctx_id
,
2049 struct io_event __user
*, events
,
2050 struct timespec __user
*, timeout
,
2051 const struct __aio_sigset __user
*, usig
)
2053 struct __aio_sigset ksig
= { NULL
, };
2054 sigset_t ksigmask
, sigsaved
;
2055 struct timespec64 ts
;
2058 if (timeout
&& unlikely(get_timespec64(&ts
, timeout
)))
2061 if (usig
&& copy_from_user(&ksig
, usig
, sizeof(ksig
)))
2065 if (ksig
.sigsetsize
!= sizeof(sigset_t
))
2067 if (copy_from_user(&ksigmask
, ksig
.sigmask
, sizeof(ksigmask
)))
2069 sigdelsetmask(&ksigmask
, sigmask(SIGKILL
) | sigmask(SIGSTOP
));
2070 sigprocmask(SIG_SETMASK
, &ksigmask
, &sigsaved
);
2073 ret
= do_io_getevents(ctx_id
, min_nr
, nr
, events
, timeout
? &ts
: NULL
);
2074 if (signal_pending(current
)) {
2076 current
->saved_sigmask
= sigsaved
;
2077 set_restore_sigmask();
2081 ret
= -ERESTARTNOHAND
;
2084 sigprocmask(SIG_SETMASK
, &sigsaved
, NULL
);
2090 #ifdef CONFIG_COMPAT
2091 COMPAT_SYSCALL_DEFINE5(io_getevents
, compat_aio_context_t
, ctx_id
,
2092 compat_long_t
, min_nr
,
2094 struct io_event __user
*, events
,
2095 struct compat_timespec __user
*, timeout
)
2097 struct timespec64 t
;
2100 if (timeout
&& compat_get_timespec64(&t
, timeout
))
2103 ret
= do_io_getevents(ctx_id
, min_nr
, nr
, events
, timeout
? &t
: NULL
);
2104 if (!ret
&& signal_pending(current
))
2110 struct __compat_aio_sigset
{
2111 compat_sigset_t __user
*sigmask
;
2112 compat_size_t sigsetsize
;
2115 COMPAT_SYSCALL_DEFINE6(io_pgetevents
,
2116 compat_aio_context_t
, ctx_id
,
2117 compat_long_t
, min_nr
,
2119 struct io_event __user
*, events
,
2120 struct compat_timespec __user
*, timeout
,
2121 const struct __compat_aio_sigset __user
*, usig
)
2123 struct __compat_aio_sigset ksig
= { NULL
, };
2124 sigset_t ksigmask
, sigsaved
;
2125 struct timespec64 t
;
2128 if (timeout
&& compat_get_timespec64(&t
, timeout
))
2131 if (usig
&& copy_from_user(&ksig
, usig
, sizeof(ksig
)))
2135 if (ksig
.sigsetsize
!= sizeof(compat_sigset_t
))
2137 if (get_compat_sigset(&ksigmask
, ksig
.sigmask
))
2139 sigdelsetmask(&ksigmask
, sigmask(SIGKILL
) | sigmask(SIGSTOP
));
2140 sigprocmask(SIG_SETMASK
, &ksigmask
, &sigsaved
);
2143 ret
= do_io_getevents(ctx_id
, min_nr
, nr
, events
, timeout
? &t
: NULL
);
2144 if (signal_pending(current
)) {
2146 current
->saved_sigmask
= sigsaved
;
2147 set_restore_sigmask();
2150 ret
= -ERESTARTNOHAND
;
2153 sigprocmask(SIG_SETMASK
, &sigsaved
, NULL
);