2 * An async IO implementation for Linux
3 * Written by Benjamin LaHaise <bcrl@kvack.org>
5 * Implements an efficient asynchronous io interface.
7 * Copyright 2000, 2001, 2002 Red Hat, Inc. All Rights Reserved.
8 * Copyright 2018 Christoph Hellwig.
10 * See ../COPYING for licensing terms.
12 #define pr_fmt(fmt) "%s: " fmt, __func__
14 #include <linux/kernel.h>
15 #include <linux/init.h>
16 #include <linux/errno.h>
17 #include <linux/time.h>
18 #include <linux/aio_abi.h>
19 #include <linux/export.h>
20 #include <linux/syscalls.h>
21 #include <linux/backing-dev.h>
22 #include <linux/refcount.h>
23 #include <linux/uio.h>
25 #include <linux/sched/signal.h>
27 #include <linux/file.h>
29 #include <linux/mman.h>
30 #include <linux/percpu.h>
31 #include <linux/slab.h>
32 #include <linux/timer.h>
33 #include <linux/aio.h>
34 #include <linux/highmem.h>
35 #include <linux/workqueue.h>
36 #include <linux/security.h>
37 #include <linux/eventfd.h>
38 #include <linux/blkdev.h>
39 #include <linux/compat.h>
40 #include <linux/migrate.h>
41 #include <linux/ramfs.h>
42 #include <linux/percpu-refcount.h>
43 #include <linux/mount.h>
44 #include <linux/pseudo_fs.h>
46 #include <linux/uaccess.h>
47 #include <linux/nospec.h>
53 #define AIO_RING_MAGIC 0xa10a10a1
54 #define AIO_RING_COMPAT_FEATURES 1
55 #define AIO_RING_INCOMPAT_FEATURES 0
57 unsigned id
; /* kernel internal index number */
58 unsigned nr
; /* number of io_events */
59 unsigned head
; /* Written to by userland or under ring_lock
60 * mutex by aio_read_events_ring(). */
64 unsigned compat_features
;
65 unsigned incompat_features
;
66 unsigned header_length
; /* size of aio_ring */
69 struct io_event io_events
[];
70 }; /* 128 bytes + ring size */
73 * Plugging is meant to work with larger batches of IOs. If we don't
74 * have more than the below, then don't bother setting up a plug.
76 #define AIO_PLUG_THRESHOLD 2
78 #define AIO_RING_PAGES 8
83 struct kioctx __rcu
*table
[] __counted_by(nr
);
87 unsigned reqs_available
;
91 struct completion comp
;
96 struct percpu_ref users
;
99 struct percpu_ref reqs
;
101 unsigned long user_id
;
103 struct kioctx_cpu __percpu
*cpu
;
106 * For percpu reqs_available, number of slots we move to/from global
111 * This is what userspace passed to io_setup(), it's not used for
112 * anything but counting against the global max_reqs quota.
114 * The real limit is nr_events - 1, which will be larger (see
119 /* Size of ringbuffer, in units of struct io_event */
122 unsigned long mmap_base
;
123 unsigned long mmap_size
;
125 struct folio
**ring_folios
;
128 struct rcu_work free_rwork
; /* see free_ioctx() */
131 * signals when all in-flight requests are done
133 struct ctx_rq_wait
*rq_wait
;
137 * This counts the number of available slots in the ringbuffer,
138 * so we avoid overflowing it: it's decremented (if positive)
139 * when allocating a kiocb and incremented when the resulting
140 * io_event is pulled off the ringbuffer.
142 * We batch accesses to it with a percpu version.
144 atomic_t reqs_available
;
145 } ____cacheline_aligned_in_smp
;
149 struct list_head active_reqs
; /* used for cancellation */
150 } ____cacheline_aligned_in_smp
;
153 struct mutex ring_lock
;
154 wait_queue_head_t wait
;
155 } ____cacheline_aligned_in_smp
;
159 unsigned completed_events
;
160 spinlock_t completion_lock
;
161 } ____cacheline_aligned_in_smp
;
163 struct folio
*internal_folios
[AIO_RING_PAGES
];
164 struct file
*aio_ring_file
;
170 * First field must be the file pointer in all the
171 * iocb unions! See also 'struct kiocb' in <linux/fs.h>
175 struct work_struct work
;
182 struct wait_queue_head
*head
;
186 bool work_need_resched
;
187 struct wait_queue_entry wait
;
188 struct work_struct work
;
192 * NOTE! Each of the iocb union members has the file pointer
193 * as the first entry in their struct definition. So you can
194 * access the file pointer through any of the sub-structs,
195 * or directly as just 'ki_filp' in this struct.
199 struct file
*ki_filp
;
201 struct fsync_iocb fsync
;
202 struct poll_iocb poll
;
205 struct kioctx
*ki_ctx
;
206 kiocb_cancel_fn
*ki_cancel
;
208 struct io_event ki_res
;
210 struct list_head ki_list
; /* the aio core uses this
211 * for cancellation */
212 refcount_t ki_refcnt
;
215 * If the aio_resfd field of the userspace iocb is not zero,
216 * this is the underlying eventfd context to deliver events to.
218 struct eventfd_ctx
*ki_eventfd
;
221 /*------ sysctl variables----*/
222 static DEFINE_SPINLOCK(aio_nr_lock
);
223 static unsigned long aio_nr
; /* current system wide number of aio requests */
224 static unsigned long aio_max_nr
= 0x10000; /* system wide maximum number of aio requests */
225 /*----end sysctl variables---*/
227 static struct ctl_table aio_sysctls
[] = {
229 .procname
= "aio-nr",
231 .maxlen
= sizeof(aio_nr
),
233 .proc_handler
= proc_doulongvec_minmax
,
236 .procname
= "aio-max-nr",
238 .maxlen
= sizeof(aio_max_nr
),
240 .proc_handler
= proc_doulongvec_minmax
,
244 static void __init
aio_sysctl_init(void)
246 register_sysctl_init("fs", aio_sysctls
);
249 #define aio_sysctl_init() do { } while (0)
252 static struct kmem_cache
*kiocb_cachep
;
253 static struct kmem_cache
*kioctx_cachep
;
255 static struct vfsmount
*aio_mnt
;
257 static const struct file_operations aio_ring_fops
;
258 static const struct address_space_operations aio_ctx_aops
;
260 static struct file
*aio_private_file(struct kioctx
*ctx
, loff_t nr_pages
)
263 struct inode
*inode
= alloc_anon_inode(aio_mnt
->mnt_sb
);
265 return ERR_CAST(inode
);
267 inode
->i_mapping
->a_ops
= &aio_ctx_aops
;
268 inode
->i_mapping
->i_private_data
= ctx
;
269 inode
->i_size
= PAGE_SIZE
* nr_pages
;
271 file
= alloc_file_pseudo(inode
, aio_mnt
, "[aio]",
272 O_RDWR
, &aio_ring_fops
);
278 static int aio_init_fs_context(struct fs_context
*fc
)
280 if (!init_pseudo(fc
, AIO_RING_MAGIC
))
282 fc
->s_iflags
|= SB_I_NOEXEC
;
287 * Creates the slab caches used by the aio routines, panic on
288 * failure as this is done early during the boot sequence.
290 static int __init
aio_setup(void)
292 static struct file_system_type aio_fs
= {
294 .init_fs_context
= aio_init_fs_context
,
295 .kill_sb
= kill_anon_super
,
297 aio_mnt
= kern_mount(&aio_fs
);
299 panic("Failed to create aio fs mount.");
301 kiocb_cachep
= KMEM_CACHE(aio_kiocb
, SLAB_HWCACHE_ALIGN
|SLAB_PANIC
);
302 kioctx_cachep
= KMEM_CACHE(kioctx
,SLAB_HWCACHE_ALIGN
|SLAB_PANIC
);
306 __initcall(aio_setup
);
308 static void put_aio_ring_file(struct kioctx
*ctx
)
310 struct file
*aio_ring_file
= ctx
->aio_ring_file
;
311 struct address_space
*i_mapping
;
314 truncate_setsize(file_inode(aio_ring_file
), 0);
316 /* Prevent further access to the kioctx from migratepages */
317 i_mapping
= aio_ring_file
->f_mapping
;
318 spin_lock(&i_mapping
->i_private_lock
);
319 i_mapping
->i_private_data
= NULL
;
320 ctx
->aio_ring_file
= NULL
;
321 spin_unlock(&i_mapping
->i_private_lock
);
327 static void aio_free_ring(struct kioctx
*ctx
)
331 /* Disconnect the kiotx from the ring file. This prevents future
332 * accesses to the kioctx from page migration.
334 put_aio_ring_file(ctx
);
336 for (i
= 0; i
< ctx
->nr_pages
; i
++) {
337 struct folio
*folio
= ctx
->ring_folios
[i
];
342 pr_debug("pid(%d) [%d] folio->count=%d\n", current
->pid
, i
,
343 folio_ref_count(folio
));
344 ctx
->ring_folios
[i
] = NULL
;
348 if (ctx
->ring_folios
&& ctx
->ring_folios
!= ctx
->internal_folios
) {
349 kfree(ctx
->ring_folios
);
350 ctx
->ring_folios
= NULL
;
354 static int aio_ring_mremap(struct vm_area_struct
*vma
)
356 struct file
*file
= vma
->vm_file
;
357 struct mm_struct
*mm
= vma
->vm_mm
;
358 struct kioctx_table
*table
;
359 int i
, res
= -EINVAL
;
361 spin_lock(&mm
->ioctx_lock
);
363 table
= rcu_dereference(mm
->ioctx_table
);
367 for (i
= 0; i
< table
->nr
; i
++) {
370 ctx
= rcu_dereference(table
->table
[i
]);
371 if (ctx
&& ctx
->aio_ring_file
== file
) {
372 if (!atomic_read(&ctx
->dead
)) {
373 ctx
->user_id
= ctx
->mmap_base
= vma
->vm_start
;
382 spin_unlock(&mm
->ioctx_lock
);
386 static const struct vm_operations_struct aio_ring_vm_ops
= {
387 .mremap
= aio_ring_mremap
,
388 #if IS_ENABLED(CONFIG_MMU)
389 .fault
= filemap_fault
,
390 .map_pages
= filemap_map_pages
,
391 .page_mkwrite
= filemap_page_mkwrite
,
395 static int aio_ring_mmap(struct file
*file
, struct vm_area_struct
*vma
)
397 vm_flags_set(vma
, VM_DONTEXPAND
);
398 vma
->vm_ops
= &aio_ring_vm_ops
;
402 static const struct file_operations aio_ring_fops
= {
403 .mmap
= aio_ring_mmap
,
406 #if IS_ENABLED(CONFIG_MIGRATION)
407 static int aio_migrate_folio(struct address_space
*mapping
, struct folio
*dst
,
408 struct folio
*src
, enum migrate_mode mode
)
415 /* mapping->i_private_lock here protects against the kioctx teardown. */
416 spin_lock(&mapping
->i_private_lock
);
417 ctx
= mapping
->i_private_data
;
423 /* The ring_lock mutex. The prevents aio_read_events() from writing
424 * to the ring's head, and prevents page migration from mucking in
425 * a partially initialized kiotx.
427 if (!mutex_trylock(&ctx
->ring_lock
)) {
433 if (idx
< (pgoff_t
)ctx
->nr_pages
) {
434 /* Make sure the old folio hasn't already been changed */
435 if (ctx
->ring_folios
[idx
] != src
)
443 /* Writeback must be complete */
444 BUG_ON(folio_test_writeback(src
));
447 rc
= folio_migrate_mapping(mapping
, dst
, src
, 1);
448 if (rc
!= MIGRATEPAGE_SUCCESS
) {
453 /* Take completion_lock to prevent other writes to the ring buffer
454 * while the old folio is copied to the new. This prevents new
455 * events from being lost.
457 spin_lock_irqsave(&ctx
->completion_lock
, flags
);
458 folio_copy(dst
, src
);
459 folio_migrate_flags(dst
, src
);
460 BUG_ON(ctx
->ring_folios
[idx
] != src
);
461 ctx
->ring_folios
[idx
] = dst
;
462 spin_unlock_irqrestore(&ctx
->completion_lock
, flags
);
464 /* The old folio is no longer accessible. */
468 mutex_unlock(&ctx
->ring_lock
);
470 spin_unlock(&mapping
->i_private_lock
);
474 #define aio_migrate_folio NULL
477 static const struct address_space_operations aio_ctx_aops
= {
478 .dirty_folio
= noop_dirty_folio
,
479 .migrate_folio
= aio_migrate_folio
,
482 static int aio_setup_ring(struct kioctx
*ctx
, unsigned int nr_events
)
484 struct aio_ring
*ring
;
485 struct mm_struct
*mm
= current
->mm
;
486 unsigned long size
, unused
;
491 /* Compensate for the ring buffer's head/tail overlap entry */
492 nr_events
+= 2; /* 1 is required, 2 for good luck */
494 size
= sizeof(struct aio_ring
);
495 size
+= sizeof(struct io_event
) * nr_events
;
497 nr_pages
= PFN_UP(size
);
501 file
= aio_private_file(ctx
, nr_pages
);
503 ctx
->aio_ring_file
= NULL
;
507 ctx
->aio_ring_file
= file
;
508 nr_events
= (PAGE_SIZE
* nr_pages
- sizeof(struct aio_ring
))
509 / sizeof(struct io_event
);
511 ctx
->ring_folios
= ctx
->internal_folios
;
512 if (nr_pages
> AIO_RING_PAGES
) {
513 ctx
->ring_folios
= kcalloc(nr_pages
, sizeof(struct folio
*),
515 if (!ctx
->ring_folios
) {
516 put_aio_ring_file(ctx
);
521 for (i
= 0; i
< nr_pages
; i
++) {
524 folio
= __filemap_get_folio(file
->f_mapping
, i
,
525 FGP_LOCK
| FGP_ACCESSED
| FGP_CREAT
,
526 GFP_USER
| __GFP_ZERO
);
530 pr_debug("pid(%d) [%d] folio->count=%d\n", current
->pid
, i
,
531 folio_ref_count(folio
));
532 folio_end_read(folio
, true);
534 ctx
->ring_folios
[i
] = folio
;
538 if (unlikely(i
!= nr_pages
)) {
543 ctx
->mmap_size
= nr_pages
* PAGE_SIZE
;
544 pr_debug("attempting mmap of %lu bytes\n", ctx
->mmap_size
);
546 if (mmap_write_lock_killable(mm
)) {
552 ctx
->mmap_base
= do_mmap(ctx
->aio_ring_file
, 0, ctx
->mmap_size
,
553 PROT_READ
| PROT_WRITE
,
554 MAP_SHARED
, 0, 0, &unused
, NULL
);
555 mmap_write_unlock(mm
);
556 if (IS_ERR((void *)ctx
->mmap_base
)) {
562 pr_debug("mmap address: 0x%08lx\n", ctx
->mmap_base
);
564 ctx
->user_id
= ctx
->mmap_base
;
565 ctx
->nr_events
= nr_events
; /* trusted copy */
567 ring
= folio_address(ctx
->ring_folios
[0]);
568 ring
->nr
= nr_events
; /* user copy */
570 ring
->head
= ring
->tail
= 0;
571 ring
->magic
= AIO_RING_MAGIC
;
572 ring
->compat_features
= AIO_RING_COMPAT_FEATURES
;
573 ring
->incompat_features
= AIO_RING_INCOMPAT_FEATURES
;
574 ring
->header_length
= sizeof(struct aio_ring
);
575 flush_dcache_folio(ctx
->ring_folios
[0]);
580 #define AIO_EVENTS_PER_PAGE (PAGE_SIZE / sizeof(struct io_event))
581 #define AIO_EVENTS_FIRST_PAGE ((PAGE_SIZE - sizeof(struct aio_ring)) / sizeof(struct io_event))
582 #define AIO_EVENTS_OFFSET (AIO_EVENTS_PER_PAGE - AIO_EVENTS_FIRST_PAGE)
584 void kiocb_set_cancel_fn(struct kiocb
*iocb
, kiocb_cancel_fn
*cancel
)
586 struct aio_kiocb
*req
;
591 * kiocb didn't come from aio or is neither a read nor a write, hence
594 if (!(iocb
->ki_flags
& IOCB_AIO_RW
))
597 req
= container_of(iocb
, struct aio_kiocb
, rw
);
599 if (WARN_ON_ONCE(!list_empty(&req
->ki_list
)))
604 spin_lock_irqsave(&ctx
->ctx_lock
, flags
);
605 list_add_tail(&req
->ki_list
, &ctx
->active_reqs
);
606 req
->ki_cancel
= cancel
;
607 spin_unlock_irqrestore(&ctx
->ctx_lock
, flags
);
609 EXPORT_SYMBOL(kiocb_set_cancel_fn
);
612 * free_ioctx() should be RCU delayed to synchronize against the RCU
613 * protected lookup_ioctx() and also needs process context to call
614 * aio_free_ring(). Use rcu_work.
616 static void free_ioctx(struct work_struct
*work
)
618 struct kioctx
*ctx
= container_of(to_rcu_work(work
), struct kioctx
,
620 pr_debug("freeing %p\n", ctx
);
623 free_percpu(ctx
->cpu
);
624 percpu_ref_exit(&ctx
->reqs
);
625 percpu_ref_exit(&ctx
->users
);
626 kmem_cache_free(kioctx_cachep
, ctx
);
629 static void free_ioctx_reqs(struct percpu_ref
*ref
)
631 struct kioctx
*ctx
= container_of(ref
, struct kioctx
, reqs
);
633 /* At this point we know that there are no any in-flight requests */
634 if (ctx
->rq_wait
&& atomic_dec_and_test(&ctx
->rq_wait
->count
))
635 complete(&ctx
->rq_wait
->comp
);
637 /* Synchronize against RCU protected table->table[] dereferences */
638 INIT_RCU_WORK(&ctx
->free_rwork
, free_ioctx
);
639 queue_rcu_work(system_wq
, &ctx
->free_rwork
);
643 * When this function runs, the kioctx has been removed from the "hash table"
644 * and ctx->users has dropped to 0, so we know no more kiocbs can be submitted -
645 * now it's safe to cancel any that need to be.
647 static void free_ioctx_users(struct percpu_ref
*ref
)
649 struct kioctx
*ctx
= container_of(ref
, struct kioctx
, users
);
650 struct aio_kiocb
*req
;
652 spin_lock_irq(&ctx
->ctx_lock
);
654 while (!list_empty(&ctx
->active_reqs
)) {
655 req
= list_first_entry(&ctx
->active_reqs
,
656 struct aio_kiocb
, ki_list
);
657 req
->ki_cancel(&req
->rw
);
658 list_del_init(&req
->ki_list
);
661 spin_unlock_irq(&ctx
->ctx_lock
);
663 percpu_ref_kill(&ctx
->reqs
);
664 percpu_ref_put(&ctx
->reqs
);
667 static int ioctx_add_table(struct kioctx
*ctx
, struct mm_struct
*mm
)
670 struct kioctx_table
*table
, *old
;
671 struct aio_ring
*ring
;
673 spin_lock(&mm
->ioctx_lock
);
674 table
= rcu_dereference_raw(mm
->ioctx_table
);
678 for (i
= 0; i
< table
->nr
; i
++)
679 if (!rcu_access_pointer(table
->table
[i
])) {
681 rcu_assign_pointer(table
->table
[i
], ctx
);
682 spin_unlock(&mm
->ioctx_lock
);
684 /* While kioctx setup is in progress,
685 * we are protected from page migration
686 * changes ring_folios by ->ring_lock.
688 ring
= folio_address(ctx
->ring_folios
[0]);
693 new_nr
= (table
? table
->nr
: 1) * 4;
694 spin_unlock(&mm
->ioctx_lock
);
696 table
= kzalloc(struct_size(table
, table
, new_nr
), GFP_KERNEL
);
702 spin_lock(&mm
->ioctx_lock
);
703 old
= rcu_dereference_raw(mm
->ioctx_table
);
706 rcu_assign_pointer(mm
->ioctx_table
, table
);
707 } else if (table
->nr
> old
->nr
) {
708 memcpy(table
->table
, old
->table
,
709 old
->nr
* sizeof(struct kioctx
*));
711 rcu_assign_pointer(mm
->ioctx_table
, table
);
720 static void aio_nr_sub(unsigned nr
)
722 spin_lock(&aio_nr_lock
);
723 if (WARN_ON(aio_nr
- nr
> aio_nr
))
727 spin_unlock(&aio_nr_lock
);
731 * Allocates and initializes an ioctx. Returns an ERR_PTR if it failed.
733 static struct kioctx
*ioctx_alloc(unsigned nr_events
)
735 struct mm_struct
*mm
= current
->mm
;
740 * Store the original nr_events -- what userspace passed to io_setup(),
741 * for counting against the global limit -- before it changes.
743 unsigned int max_reqs
= nr_events
;
746 * We keep track of the number of available ringbuffer slots, to prevent
747 * overflow (reqs_available), and we also use percpu counters for this.
749 * So since up to half the slots might be on other cpu's percpu counters
750 * and unavailable, double nr_events so userspace sees what they
751 * expected: additionally, we move req_batch slots to/from percpu
752 * counters at a time, so make sure that isn't 0:
754 nr_events
= max(nr_events
, num_possible_cpus() * 4);
757 /* Prevent overflows */
758 if (nr_events
> (0x10000000U
/ sizeof(struct io_event
))) {
759 pr_debug("ENOMEM: nr_events too high\n");
760 return ERR_PTR(-EINVAL
);
763 if (!nr_events
|| (unsigned long)max_reqs
> aio_max_nr
)
764 return ERR_PTR(-EAGAIN
);
766 ctx
= kmem_cache_zalloc(kioctx_cachep
, GFP_KERNEL
);
768 return ERR_PTR(-ENOMEM
);
770 ctx
->max_reqs
= max_reqs
;
772 spin_lock_init(&ctx
->ctx_lock
);
773 spin_lock_init(&ctx
->completion_lock
);
774 mutex_init(&ctx
->ring_lock
);
775 /* Protect against page migration throughout kiotx setup by keeping
776 * the ring_lock mutex held until setup is complete. */
777 mutex_lock(&ctx
->ring_lock
);
778 init_waitqueue_head(&ctx
->wait
);
780 INIT_LIST_HEAD(&ctx
->active_reqs
);
782 if (percpu_ref_init(&ctx
->users
, free_ioctx_users
, 0, GFP_KERNEL
))
785 if (percpu_ref_init(&ctx
->reqs
, free_ioctx_reqs
, 0, GFP_KERNEL
))
788 ctx
->cpu
= alloc_percpu(struct kioctx_cpu
);
792 err
= aio_setup_ring(ctx
, nr_events
);
796 atomic_set(&ctx
->reqs_available
, ctx
->nr_events
- 1);
797 ctx
->req_batch
= (ctx
->nr_events
- 1) / (num_possible_cpus() * 4);
798 if (ctx
->req_batch
< 1)
801 /* limit the number of system wide aios */
802 spin_lock(&aio_nr_lock
);
803 if (aio_nr
+ ctx
->max_reqs
> aio_max_nr
||
804 aio_nr
+ ctx
->max_reqs
< aio_nr
) {
805 spin_unlock(&aio_nr_lock
);
809 aio_nr
+= ctx
->max_reqs
;
810 spin_unlock(&aio_nr_lock
);
812 percpu_ref_get(&ctx
->users
); /* io_setup() will drop this ref */
813 percpu_ref_get(&ctx
->reqs
); /* free_ioctx_users() will drop this */
815 err
= ioctx_add_table(ctx
, mm
);
819 /* Release the ring_lock mutex now that all setup is complete. */
820 mutex_unlock(&ctx
->ring_lock
);
822 pr_debug("allocated ioctx %p[%ld]: mm=%p mask=0x%x\n",
823 ctx
, ctx
->user_id
, mm
, ctx
->nr_events
);
827 aio_nr_sub(ctx
->max_reqs
);
829 atomic_set(&ctx
->dead
, 1);
831 vm_munmap(ctx
->mmap_base
, ctx
->mmap_size
);
834 mutex_unlock(&ctx
->ring_lock
);
835 free_percpu(ctx
->cpu
);
836 percpu_ref_exit(&ctx
->reqs
);
837 percpu_ref_exit(&ctx
->users
);
838 kmem_cache_free(kioctx_cachep
, ctx
);
839 pr_debug("error allocating ioctx %d\n", err
);
844 * Cancels all outstanding aio requests on an aio context. Used
845 * when the processes owning a context have all exited to encourage
846 * the rapid destruction of the kioctx.
848 static int kill_ioctx(struct mm_struct
*mm
, struct kioctx
*ctx
,
849 struct ctx_rq_wait
*wait
)
851 struct kioctx_table
*table
;
853 spin_lock(&mm
->ioctx_lock
);
854 if (atomic_xchg(&ctx
->dead
, 1)) {
855 spin_unlock(&mm
->ioctx_lock
);
859 table
= rcu_dereference_raw(mm
->ioctx_table
);
860 WARN_ON(ctx
!= rcu_access_pointer(table
->table
[ctx
->id
]));
861 RCU_INIT_POINTER(table
->table
[ctx
->id
], NULL
);
862 spin_unlock(&mm
->ioctx_lock
);
864 /* free_ioctx_reqs() will do the necessary RCU synchronization */
865 wake_up_all(&ctx
->wait
);
868 * It'd be more correct to do this in free_ioctx(), after all
869 * the outstanding kiocbs have finished - but by then io_destroy
870 * has already returned, so io_setup() could potentially return
871 * -EAGAIN with no ioctxs actually in use (as far as userspace
874 aio_nr_sub(ctx
->max_reqs
);
877 vm_munmap(ctx
->mmap_base
, ctx
->mmap_size
);
880 percpu_ref_kill(&ctx
->users
);
885 * exit_aio: called when the last user of mm goes away. At this point, there is
886 * no way for any new requests to be submited or any of the io_* syscalls to be
887 * called on the context.
889 * There may be outstanding kiocbs, but free_ioctx() will explicitly wait on
892 void exit_aio(struct mm_struct
*mm
)
894 struct kioctx_table
*table
= rcu_dereference_raw(mm
->ioctx_table
);
895 struct ctx_rq_wait wait
;
901 atomic_set(&wait
.count
, table
->nr
);
902 init_completion(&wait
.comp
);
905 for (i
= 0; i
< table
->nr
; ++i
) {
907 rcu_dereference_protected(table
->table
[i
], true);
915 * We don't need to bother with munmap() here - exit_mmap(mm)
916 * is coming and it'll unmap everything. And we simply can't,
917 * this is not necessarily our ->mm.
918 * Since kill_ioctx() uses non-zero ->mmap_size as indicator
919 * that it needs to unmap the area, just set it to 0.
922 kill_ioctx(mm
, ctx
, &wait
);
925 if (!atomic_sub_and_test(skipped
, &wait
.count
)) {
926 /* Wait until all IO for the context are done. */
927 wait_for_completion(&wait
.comp
);
930 RCU_INIT_POINTER(mm
->ioctx_table
, NULL
);
934 static void put_reqs_available(struct kioctx
*ctx
, unsigned nr
)
936 struct kioctx_cpu
*kcpu
;
939 local_irq_save(flags
);
940 kcpu
= this_cpu_ptr(ctx
->cpu
);
941 kcpu
->reqs_available
+= nr
;
943 while (kcpu
->reqs_available
>= ctx
->req_batch
* 2) {
944 kcpu
->reqs_available
-= ctx
->req_batch
;
945 atomic_add(ctx
->req_batch
, &ctx
->reqs_available
);
948 local_irq_restore(flags
);
951 static bool __get_reqs_available(struct kioctx
*ctx
)
953 struct kioctx_cpu
*kcpu
;
957 local_irq_save(flags
);
958 kcpu
= this_cpu_ptr(ctx
->cpu
);
959 if (!kcpu
->reqs_available
) {
960 int avail
= atomic_read(&ctx
->reqs_available
);
963 if (avail
< ctx
->req_batch
)
965 } while (!atomic_try_cmpxchg(&ctx
->reqs_available
,
966 &avail
, avail
- ctx
->req_batch
));
968 kcpu
->reqs_available
+= ctx
->req_batch
;
972 kcpu
->reqs_available
--;
974 local_irq_restore(flags
);
978 /* refill_reqs_available
979 * Updates the reqs_available reference counts used for tracking the
980 * number of free slots in the completion ring. This can be called
981 * from aio_complete() (to optimistically update reqs_available) or
982 * from aio_get_req() (the we're out of events case). It must be
983 * called holding ctx->completion_lock.
985 static void refill_reqs_available(struct kioctx
*ctx
, unsigned head
,
988 unsigned events_in_ring
, completed
;
990 /* Clamp head since userland can write to it. */
991 head
%= ctx
->nr_events
;
993 events_in_ring
= tail
- head
;
995 events_in_ring
= ctx
->nr_events
- (head
- tail
);
997 completed
= ctx
->completed_events
;
998 if (events_in_ring
< completed
)
999 completed
-= events_in_ring
;
1006 ctx
->completed_events
-= completed
;
1007 put_reqs_available(ctx
, completed
);
1010 /* user_refill_reqs_available
1011 * Called to refill reqs_available when aio_get_req() encounters an
1012 * out of space in the completion ring.
1014 static void user_refill_reqs_available(struct kioctx
*ctx
)
1016 spin_lock_irq(&ctx
->completion_lock
);
1017 if (ctx
->completed_events
) {
1018 struct aio_ring
*ring
;
1021 /* Access of ring->head may race with aio_read_events_ring()
1022 * here, but that's okay since whether we read the old version
1023 * or the new version, and either will be valid. The important
1024 * part is that head cannot pass tail since we prevent
1025 * aio_complete() from updating tail by holding
1026 * ctx->completion_lock. Even if head is invalid, the check
1027 * against ctx->completed_events below will make sure we do the
1030 ring
= folio_address(ctx
->ring_folios
[0]);
1033 refill_reqs_available(ctx
, head
, ctx
->tail
);
1036 spin_unlock_irq(&ctx
->completion_lock
);
1039 static bool get_reqs_available(struct kioctx
*ctx
)
1041 if (__get_reqs_available(ctx
))
1043 user_refill_reqs_available(ctx
);
1044 return __get_reqs_available(ctx
);
1048 * Allocate a slot for an aio request.
1049 * Returns NULL if no requests are free.
1051 * The refcount is initialized to 2 - one for the async op completion,
1052 * one for the synchronous code that does this.
1054 static inline struct aio_kiocb
*aio_get_req(struct kioctx
*ctx
)
1056 struct aio_kiocb
*req
;
1058 req
= kmem_cache_alloc(kiocb_cachep
, GFP_KERNEL
);
1062 if (unlikely(!get_reqs_available(ctx
))) {
1063 kmem_cache_free(kiocb_cachep
, req
);
1067 percpu_ref_get(&ctx
->reqs
);
1069 INIT_LIST_HEAD(&req
->ki_list
);
1070 refcount_set(&req
->ki_refcnt
, 2);
1071 req
->ki_eventfd
= NULL
;
1075 static struct kioctx
*lookup_ioctx(unsigned long ctx_id
)
1077 struct aio_ring __user
*ring
= (void __user
*)ctx_id
;
1078 struct mm_struct
*mm
= current
->mm
;
1079 struct kioctx
*ctx
, *ret
= NULL
;
1080 struct kioctx_table
*table
;
1083 if (get_user(id
, &ring
->id
))
1087 table
= rcu_dereference(mm
->ioctx_table
);
1089 if (!table
|| id
>= table
->nr
)
1092 id
= array_index_nospec(id
, table
->nr
);
1093 ctx
= rcu_dereference(table
->table
[id
]);
1094 if (ctx
&& ctx
->user_id
== ctx_id
) {
1095 if (percpu_ref_tryget_live(&ctx
->users
))
1103 static inline void iocb_destroy(struct aio_kiocb
*iocb
)
1105 if (iocb
->ki_eventfd
)
1106 eventfd_ctx_put(iocb
->ki_eventfd
);
1108 fput(iocb
->ki_filp
);
1109 percpu_ref_put(&iocb
->ki_ctx
->reqs
);
1110 kmem_cache_free(kiocb_cachep
, iocb
);
1114 struct wait_queue_entry w
;
1119 * Called when the io request on the given iocb is complete.
1121 static void aio_complete(struct aio_kiocb
*iocb
)
1123 struct kioctx
*ctx
= iocb
->ki_ctx
;
1124 struct aio_ring
*ring
;
1125 struct io_event
*ev_page
, *event
;
1126 unsigned tail
, pos
, head
, avail
;
1127 unsigned long flags
;
1130 * Add a completion event to the ring buffer. Must be done holding
1131 * ctx->completion_lock to prevent other code from messing with the tail
1132 * pointer since we might be called from irq context.
1134 spin_lock_irqsave(&ctx
->completion_lock
, flags
);
1137 pos
= tail
+ AIO_EVENTS_OFFSET
;
1139 if (++tail
>= ctx
->nr_events
)
1142 ev_page
= folio_address(ctx
->ring_folios
[pos
/ AIO_EVENTS_PER_PAGE
]);
1143 event
= ev_page
+ pos
% AIO_EVENTS_PER_PAGE
;
1145 *event
= iocb
->ki_res
;
1147 flush_dcache_folio(ctx
->ring_folios
[pos
/ AIO_EVENTS_PER_PAGE
]);
1149 pr_debug("%p[%u]: %p: %p %Lx %Lx %Lx\n", ctx
, tail
, iocb
,
1150 (void __user
*)(unsigned long)iocb
->ki_res
.obj
,
1151 iocb
->ki_res
.data
, iocb
->ki_res
.res
, iocb
->ki_res
.res2
);
1153 /* after flagging the request as done, we
1154 * must never even look at it again
1156 smp_wmb(); /* make event visible before updating tail */
1160 ring
= folio_address(ctx
->ring_folios
[0]);
1163 flush_dcache_folio(ctx
->ring_folios
[0]);
1165 ctx
->completed_events
++;
1166 if (ctx
->completed_events
> 1)
1167 refill_reqs_available(ctx
, head
, tail
);
1171 : tail
+ ctx
->nr_events
- head
;
1172 spin_unlock_irqrestore(&ctx
->completion_lock
, flags
);
1174 pr_debug("added to ring %p at [%u]\n", iocb
, tail
);
1177 * Check if the user asked us to deliver the result through an
1178 * eventfd. The eventfd_signal() function is safe to be called
1181 if (iocb
->ki_eventfd
)
1182 eventfd_signal(iocb
->ki_eventfd
);
1185 * We have to order our ring_info tail store above and test
1186 * of the wait list below outside the wait lock. This is
1187 * like in wake_up_bit() where clearing a bit has to be
1188 * ordered with the unlocked test.
1192 if (waitqueue_active(&ctx
->wait
)) {
1193 struct aio_waiter
*curr
, *next
;
1194 unsigned long flags
;
1196 spin_lock_irqsave(&ctx
->wait
.lock
, flags
);
1197 list_for_each_entry_safe(curr
, next
, &ctx
->wait
.head
, w
.entry
)
1198 if (avail
>= curr
->min_nr
) {
1199 wake_up_process(curr
->w
.private);
1200 list_del_init_careful(&curr
->w
.entry
);
1202 spin_unlock_irqrestore(&ctx
->wait
.lock
, flags
);
1206 static inline void iocb_put(struct aio_kiocb
*iocb
)
1208 if (refcount_dec_and_test(&iocb
->ki_refcnt
)) {
1214 /* aio_read_events_ring
1215 * Pull an event off of the ioctx's event ring. Returns the number of
1218 static long aio_read_events_ring(struct kioctx
*ctx
,
1219 struct io_event __user
*event
, long nr
)
1221 struct aio_ring
*ring
;
1222 unsigned head
, tail
, pos
;
1227 * The mutex can block and wake us up and that will cause
1228 * wait_event_interruptible_hrtimeout() to schedule without sleeping
1229 * and repeat. This should be rare enough that it doesn't cause
1230 * peformance issues. See the comment in read_events() for more detail.
1232 sched_annotate_sleep();
1233 mutex_lock(&ctx
->ring_lock
);
1235 /* Access to ->ring_folios here is protected by ctx->ring_lock. */
1236 ring
= folio_address(ctx
->ring_folios
[0]);
1241 * Ensure that once we've read the current tail pointer, that
1242 * we also see the events that were stored up to the tail.
1246 pr_debug("h%u t%u m%u\n", head
, tail
, ctx
->nr_events
);
1251 head
%= ctx
->nr_events
;
1252 tail
%= ctx
->nr_events
;
1256 struct io_event
*ev
;
1257 struct folio
*folio
;
1259 avail
= (head
<= tail
? tail
: ctx
->nr_events
) - head
;
1263 pos
= head
+ AIO_EVENTS_OFFSET
;
1264 folio
= ctx
->ring_folios
[pos
/ AIO_EVENTS_PER_PAGE
];
1265 pos
%= AIO_EVENTS_PER_PAGE
;
1267 avail
= min(avail
, nr
- ret
);
1268 avail
= min_t(long, avail
, AIO_EVENTS_PER_PAGE
- pos
);
1270 ev
= folio_address(folio
);
1271 copy_ret
= copy_to_user(event
+ ret
, ev
+ pos
,
1272 sizeof(*ev
) * avail
);
1274 if (unlikely(copy_ret
)) {
1281 head
%= ctx
->nr_events
;
1284 ring
= folio_address(ctx
->ring_folios
[0]);
1286 flush_dcache_folio(ctx
->ring_folios
[0]);
1288 pr_debug("%li h%u t%u\n", ret
, head
, tail
);
1290 mutex_unlock(&ctx
->ring_lock
);
1295 static bool aio_read_events(struct kioctx
*ctx
, long min_nr
, long nr
,
1296 struct io_event __user
*event
, long *i
)
1298 long ret
= aio_read_events_ring(ctx
, event
+ *i
, nr
- *i
);
1303 if (unlikely(atomic_read(&ctx
->dead
)))
1309 return ret
< 0 || *i
>= min_nr
;
1312 static long read_events(struct kioctx
*ctx
, long min_nr
, long nr
,
1313 struct io_event __user
*event
,
1316 struct hrtimer_sleeper t
;
1317 struct aio_waiter w
;
1318 long ret
= 0, ret2
= 0;
1321 * Note that aio_read_events() is being called as the conditional - i.e.
1322 * we're calling it after prepare_to_wait() has set task state to
1323 * TASK_INTERRUPTIBLE.
1325 * But aio_read_events() can block, and if it blocks it's going to flip
1326 * the task state back to TASK_RUNNING.
1328 * This should be ok, provided it doesn't flip the state back to
1329 * TASK_RUNNING and return 0 too much - that causes us to spin. That
1330 * will only happen if the mutex_lock() call blocks, and we then find
1331 * the ringbuffer empty. So in practice we should be ok, but it's
1332 * something to be aware of when touching this code.
1334 aio_read_events(ctx
, min_nr
, nr
, event
, &ret
);
1335 if (until
== 0 || ret
< 0 || ret
>= min_nr
)
1338 hrtimer_setup_sleeper_on_stack(&t
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL
);
1339 if (until
!= KTIME_MAX
) {
1340 hrtimer_set_expires_range_ns(&t
.timer
, until
, current
->timer_slack_ns
);
1341 hrtimer_sleeper_start_expires(&t
, HRTIMER_MODE_REL
);
1347 unsigned long nr_got
= ret
;
1349 w
.min_nr
= min_nr
- ret
;
1351 ret2
= prepare_to_wait_event(&ctx
->wait
, &w
.w
, TASK_INTERRUPTIBLE
);
1352 if (!ret2
&& !t
.task
)
1355 if (aio_read_events(ctx
, min_nr
, nr
, event
, &ret
) || ret2
)
1362 finish_wait(&ctx
->wait
, &w
.w
);
1363 hrtimer_cancel(&t
.timer
);
1364 destroy_hrtimer_on_stack(&t
.timer
);
1370 * Create an aio_context capable of receiving at least nr_events.
1371 * ctxp must not point to an aio_context that already exists, and
1372 * must be initialized to 0 prior to the call. On successful
1373 * creation of the aio_context, *ctxp is filled in with the resulting
1374 * handle. May fail with -EINVAL if *ctxp is not initialized,
1375 * if the specified nr_events exceeds internal limits. May fail
1376 * with -EAGAIN if the specified nr_events exceeds the user's limit
1377 * of available events. May fail with -ENOMEM if insufficient kernel
1378 * resources are available. May fail with -EFAULT if an invalid
1379 * pointer is passed for ctxp. Will fail with -ENOSYS if not
1382 SYSCALL_DEFINE2(io_setup
, unsigned, nr_events
, aio_context_t __user
*, ctxp
)
1384 struct kioctx
*ioctx
= NULL
;
1388 ret
= get_user(ctx
, ctxp
);
1393 if (unlikely(ctx
|| nr_events
== 0)) {
1394 pr_debug("EINVAL: ctx %lu nr_events %u\n",
1399 ioctx
= ioctx_alloc(nr_events
);
1400 ret
= PTR_ERR(ioctx
);
1401 if (!IS_ERR(ioctx
)) {
1402 ret
= put_user(ioctx
->user_id
, ctxp
);
1404 kill_ioctx(current
->mm
, ioctx
, NULL
);
1405 percpu_ref_put(&ioctx
->users
);
1412 #ifdef CONFIG_COMPAT
1413 COMPAT_SYSCALL_DEFINE2(io_setup
, unsigned, nr_events
, u32 __user
*, ctx32p
)
1415 struct kioctx
*ioctx
= NULL
;
1419 ret
= get_user(ctx
, ctx32p
);
1424 if (unlikely(ctx
|| nr_events
== 0)) {
1425 pr_debug("EINVAL: ctx %lu nr_events %u\n",
1430 ioctx
= ioctx_alloc(nr_events
);
1431 ret
= PTR_ERR(ioctx
);
1432 if (!IS_ERR(ioctx
)) {
1433 /* truncating is ok because it's a user address */
1434 ret
= put_user((u32
)ioctx
->user_id
, ctx32p
);
1436 kill_ioctx(current
->mm
, ioctx
, NULL
);
1437 percpu_ref_put(&ioctx
->users
);
1446 * Destroy the aio_context specified. May cancel any outstanding
1447 * AIOs and block on completion. Will fail with -ENOSYS if not
1448 * implemented. May fail with -EINVAL if the context pointed to
1451 SYSCALL_DEFINE1(io_destroy
, aio_context_t
, ctx
)
1453 struct kioctx
*ioctx
= lookup_ioctx(ctx
);
1454 if (likely(NULL
!= ioctx
)) {
1455 struct ctx_rq_wait wait
;
1458 init_completion(&wait
.comp
);
1459 atomic_set(&wait
.count
, 1);
1461 /* Pass requests_done to kill_ioctx() where it can be set
1462 * in a thread-safe way. If we try to set it here then we have
1463 * a race condition if two io_destroy() called simultaneously.
1465 ret
= kill_ioctx(current
->mm
, ioctx
, &wait
);
1466 percpu_ref_put(&ioctx
->users
);
1468 /* Wait until all IO for the context are done. Otherwise kernel
1469 * keep using user-space buffers even if user thinks the context
1473 wait_for_completion(&wait
.comp
);
1477 pr_debug("EINVAL: invalid context id\n");
1481 static void aio_remove_iocb(struct aio_kiocb
*iocb
)
1483 struct kioctx
*ctx
= iocb
->ki_ctx
;
1484 unsigned long flags
;
1486 spin_lock_irqsave(&ctx
->ctx_lock
, flags
);
1487 list_del(&iocb
->ki_list
);
1488 spin_unlock_irqrestore(&ctx
->ctx_lock
, flags
);
1491 static void aio_complete_rw(struct kiocb
*kiocb
, long res
)
1493 struct aio_kiocb
*iocb
= container_of(kiocb
, struct aio_kiocb
, rw
);
1495 if (!list_empty_careful(&iocb
->ki_list
))
1496 aio_remove_iocb(iocb
);
1498 if (kiocb
->ki_flags
& IOCB_WRITE
) {
1499 struct inode
*inode
= file_inode(kiocb
->ki_filp
);
1501 if (S_ISREG(inode
->i_mode
))
1502 kiocb_end_write(kiocb
);
1505 iocb
->ki_res
.res
= res
;
1506 iocb
->ki_res
.res2
= 0;
1510 static int aio_prep_rw(struct kiocb
*req
, const struct iocb
*iocb
, int rw_type
)
1514 req
->ki_complete
= aio_complete_rw
;
1515 req
->private = NULL
;
1516 req
->ki_pos
= iocb
->aio_offset
;
1517 req
->ki_flags
= req
->ki_filp
->f_iocb_flags
| IOCB_AIO_RW
;
1518 if (iocb
->aio_flags
& IOCB_FLAG_RESFD
)
1519 req
->ki_flags
|= IOCB_EVENTFD
;
1520 if (iocb
->aio_flags
& IOCB_FLAG_IOPRIO
) {
1522 * If the IOCB_FLAG_IOPRIO flag of aio_flags is set, then
1523 * aio_reqprio is interpreted as an I/O scheduling
1524 * class and priority.
1526 ret
= ioprio_check_cap(iocb
->aio_reqprio
);
1528 pr_debug("aio ioprio check cap error: %d\n", ret
);
1532 req
->ki_ioprio
= iocb
->aio_reqprio
;
1534 req
->ki_ioprio
= get_current_ioprio();
1536 ret
= kiocb_set_rw_flags(req
, iocb
->aio_rw_flags
, rw_type
);
1540 req
->ki_flags
&= ~IOCB_HIPRI
; /* no one is going to poll for this I/O */
1544 static ssize_t
aio_setup_rw(int rw
, const struct iocb
*iocb
,
1545 struct iovec
**iovec
, bool vectored
, bool compat
,
1546 struct iov_iter
*iter
)
1548 void __user
*buf
= (void __user
*)(uintptr_t)iocb
->aio_buf
;
1549 size_t len
= iocb
->aio_nbytes
;
1552 ssize_t ret
= import_ubuf(rw
, buf
, len
, iter
);
1557 return __import_iovec(rw
, buf
, len
, UIO_FASTIOV
, iovec
, iter
, compat
);
1560 static inline void aio_rw_done(struct kiocb
*req
, ssize_t ret
)
1566 case -ERESTARTNOINTR
:
1567 case -ERESTARTNOHAND
:
1568 case -ERESTART_RESTARTBLOCK
:
1570 * There's no easy way to restart the syscall since other AIO's
1571 * may be already running. Just fail this IO with EINTR.
1576 req
->ki_complete(req
, ret
);
1580 static int aio_read(struct kiocb
*req
, const struct iocb
*iocb
,
1581 bool vectored
, bool compat
)
1583 struct iovec inline_vecs
[UIO_FASTIOV
], *iovec
= inline_vecs
;
1584 struct iov_iter iter
;
1588 ret
= aio_prep_rw(req
, iocb
, READ
);
1591 file
= req
->ki_filp
;
1592 if (unlikely(!(file
->f_mode
& FMODE_READ
)))
1594 if (unlikely(!file
->f_op
->read_iter
))
1597 ret
= aio_setup_rw(ITER_DEST
, iocb
, &iovec
, vectored
, compat
, &iter
);
1600 ret
= rw_verify_area(READ
, file
, &req
->ki_pos
, iov_iter_count(&iter
));
1602 aio_rw_done(req
, file
->f_op
->read_iter(req
, &iter
));
1607 static int aio_write(struct kiocb
*req
, const struct iocb
*iocb
,
1608 bool vectored
, bool compat
)
1610 struct iovec inline_vecs
[UIO_FASTIOV
], *iovec
= inline_vecs
;
1611 struct iov_iter iter
;
1615 ret
= aio_prep_rw(req
, iocb
, WRITE
);
1618 file
= req
->ki_filp
;
1620 if (unlikely(!(file
->f_mode
& FMODE_WRITE
)))
1622 if (unlikely(!file
->f_op
->write_iter
))
1625 ret
= aio_setup_rw(ITER_SOURCE
, iocb
, &iovec
, vectored
, compat
, &iter
);
1628 ret
= rw_verify_area(WRITE
, file
, &req
->ki_pos
, iov_iter_count(&iter
));
1630 if (S_ISREG(file_inode(file
)->i_mode
))
1631 kiocb_start_write(req
);
1632 req
->ki_flags
|= IOCB_WRITE
;
1633 aio_rw_done(req
, file
->f_op
->write_iter(req
, &iter
));
1639 static void aio_fsync_work(struct work_struct
*work
)
1641 struct aio_kiocb
*iocb
= container_of(work
, struct aio_kiocb
, fsync
.work
);
1642 const struct cred
*old_cred
= override_creds(iocb
->fsync
.creds
);
1644 iocb
->ki_res
.res
= vfs_fsync(iocb
->fsync
.file
, iocb
->fsync
.datasync
);
1645 revert_creds(old_cred
);
1646 put_cred(iocb
->fsync
.creds
);
1650 static int aio_fsync(struct fsync_iocb
*req
, const struct iocb
*iocb
,
1653 if (unlikely(iocb
->aio_buf
|| iocb
->aio_offset
|| iocb
->aio_nbytes
||
1654 iocb
->aio_rw_flags
))
1657 if (unlikely(!req
->file
->f_op
->fsync
))
1660 req
->creds
= prepare_creds();
1664 req
->datasync
= datasync
;
1665 INIT_WORK(&req
->work
, aio_fsync_work
);
1666 schedule_work(&req
->work
);
1670 static void aio_poll_put_work(struct work_struct
*work
)
1672 struct poll_iocb
*req
= container_of(work
, struct poll_iocb
, work
);
1673 struct aio_kiocb
*iocb
= container_of(req
, struct aio_kiocb
, poll
);
1679 * Safely lock the waitqueue which the request is on, synchronizing with the
1680 * case where the ->poll() provider decides to free its waitqueue early.
1682 * Returns true on success, meaning that req->head->lock was locked, req->wait
1683 * is on req->head, and an RCU read lock was taken. Returns false if the
1684 * request was already removed from its waitqueue (which might no longer exist).
1686 static bool poll_iocb_lock_wq(struct poll_iocb
*req
)
1688 wait_queue_head_t
*head
;
1691 * While we hold the waitqueue lock and the waitqueue is nonempty,
1692 * wake_up_pollfree() will wait for us. However, taking the waitqueue
1693 * lock in the first place can race with the waitqueue being freed.
1695 * We solve this as eventpoll does: by taking advantage of the fact that
1696 * all users of wake_up_pollfree() will RCU-delay the actual free. If
1697 * we enter rcu_read_lock() and see that the pointer to the queue is
1698 * non-NULL, we can then lock it without the memory being freed out from
1699 * under us, then check whether the request is still on the queue.
1701 * Keep holding rcu_read_lock() as long as we hold the queue lock, in
1702 * case the caller deletes the entry from the queue, leaving it empty.
1703 * In that case, only RCU prevents the queue memory from being freed.
1706 head
= smp_load_acquire(&req
->head
);
1708 spin_lock(&head
->lock
);
1709 if (!list_empty(&req
->wait
.entry
))
1711 spin_unlock(&head
->lock
);
1717 static void poll_iocb_unlock_wq(struct poll_iocb
*req
)
1719 spin_unlock(&req
->head
->lock
);
1723 static void aio_poll_complete_work(struct work_struct
*work
)
1725 struct poll_iocb
*req
= container_of(work
, struct poll_iocb
, work
);
1726 struct aio_kiocb
*iocb
= container_of(req
, struct aio_kiocb
, poll
);
1727 struct poll_table_struct pt
= { ._key
= req
->events
};
1728 struct kioctx
*ctx
= iocb
->ki_ctx
;
1731 if (!READ_ONCE(req
->cancelled
))
1732 mask
= vfs_poll(req
->file
, &pt
) & req
->events
;
1735 * Note that ->ki_cancel callers also delete iocb from active_reqs after
1736 * calling ->ki_cancel. We need the ctx_lock roundtrip here to
1737 * synchronize with them. In the cancellation case the list_del_init
1738 * itself is not actually needed, but harmless so we keep it in to
1739 * avoid further branches in the fast path.
1741 spin_lock_irq(&ctx
->ctx_lock
);
1742 if (poll_iocb_lock_wq(req
)) {
1743 if (!mask
&& !READ_ONCE(req
->cancelled
)) {
1745 * The request isn't actually ready to be completed yet.
1746 * Reschedule completion if another wakeup came in.
1748 if (req
->work_need_resched
) {
1749 schedule_work(&req
->work
);
1750 req
->work_need_resched
= false;
1752 req
->work_scheduled
= false;
1754 poll_iocb_unlock_wq(req
);
1755 spin_unlock_irq(&ctx
->ctx_lock
);
1758 list_del_init(&req
->wait
.entry
);
1759 poll_iocb_unlock_wq(req
);
1760 } /* else, POLLFREE has freed the waitqueue, so we must complete */
1761 list_del_init(&iocb
->ki_list
);
1762 iocb
->ki_res
.res
= mangle_poll(mask
);
1763 spin_unlock_irq(&ctx
->ctx_lock
);
1768 /* assumes we are called with irqs disabled */
1769 static int aio_poll_cancel(struct kiocb
*iocb
)
1771 struct aio_kiocb
*aiocb
= container_of(iocb
, struct aio_kiocb
, rw
);
1772 struct poll_iocb
*req
= &aiocb
->poll
;
1774 if (poll_iocb_lock_wq(req
)) {
1775 WRITE_ONCE(req
->cancelled
, true);
1776 if (!req
->work_scheduled
) {
1777 schedule_work(&aiocb
->poll
.work
);
1778 req
->work_scheduled
= true;
1780 poll_iocb_unlock_wq(req
);
1781 } /* else, the request was force-cancelled by POLLFREE already */
1786 static int aio_poll_wake(struct wait_queue_entry
*wait
, unsigned mode
, int sync
,
1789 struct poll_iocb
*req
= container_of(wait
, struct poll_iocb
, wait
);
1790 struct aio_kiocb
*iocb
= container_of(req
, struct aio_kiocb
, poll
);
1791 __poll_t mask
= key_to_poll(key
);
1792 unsigned long flags
;
1794 /* for instances that support it check for an event match first: */
1795 if (mask
&& !(mask
& req
->events
))
1799 * Complete the request inline if possible. This requires that three
1800 * conditions be met:
1801 * 1. An event mask must have been passed. If a plain wakeup was done
1802 * instead, then mask == 0 and we have to call vfs_poll() to get
1803 * the events, so inline completion isn't possible.
1804 * 2. The completion work must not have already been scheduled.
1805 * 3. ctx_lock must not be busy. We have to use trylock because we
1806 * already hold the waitqueue lock, so this inverts the normal
1807 * locking order. Use irqsave/irqrestore because not all
1808 * filesystems (e.g. fuse) call this function with IRQs disabled,
1809 * yet IRQs have to be disabled before ctx_lock is obtained.
1811 if (mask
&& !req
->work_scheduled
&&
1812 spin_trylock_irqsave(&iocb
->ki_ctx
->ctx_lock
, flags
)) {
1813 struct kioctx
*ctx
= iocb
->ki_ctx
;
1815 list_del_init(&req
->wait
.entry
);
1816 list_del(&iocb
->ki_list
);
1817 iocb
->ki_res
.res
= mangle_poll(mask
);
1818 if (iocb
->ki_eventfd
&& !eventfd_signal_allowed()) {
1820 INIT_WORK(&req
->work
, aio_poll_put_work
);
1821 schedule_work(&req
->work
);
1823 spin_unlock_irqrestore(&ctx
->ctx_lock
, flags
);
1828 * Schedule the completion work if needed. If it was already
1829 * scheduled, record that another wakeup came in.
1831 * Don't remove the request from the waitqueue here, as it might
1832 * not actually be complete yet (we won't know until vfs_poll()
1833 * is called), and we must not miss any wakeups. POLLFREE is an
1834 * exception to this; see below.
1836 if (req
->work_scheduled
) {
1837 req
->work_need_resched
= true;
1839 schedule_work(&req
->work
);
1840 req
->work_scheduled
= true;
1844 * If the waitqueue is being freed early but we can't complete
1845 * the request inline, we have to tear down the request as best
1846 * we can. That means immediately removing the request from its
1847 * waitqueue and preventing all further accesses to the
1848 * waitqueue via the request. We also need to schedule the
1849 * completion work (done above). Also mark the request as
1850 * cancelled, to potentially skip an unneeded call to ->poll().
1852 if (mask
& POLLFREE
) {
1853 WRITE_ONCE(req
->cancelled
, true);
1854 list_del_init(&req
->wait
.entry
);
1857 * Careful: this *must* be the last step, since as soon
1858 * as req->head is NULL'ed out, the request can be
1859 * completed and freed, since aio_poll_complete_work()
1860 * will no longer need to take the waitqueue lock.
1862 smp_store_release(&req
->head
, NULL
);
1868 struct aio_poll_table
{
1869 struct poll_table_struct pt
;
1870 struct aio_kiocb
*iocb
;
1876 aio_poll_queue_proc(struct file
*file
, struct wait_queue_head
*head
,
1877 struct poll_table_struct
*p
)
1879 struct aio_poll_table
*pt
= container_of(p
, struct aio_poll_table
, pt
);
1881 /* multiple wait queues per file are not supported */
1882 if (unlikely(pt
->queued
)) {
1883 pt
->error
= -EINVAL
;
1889 pt
->iocb
->poll
.head
= head
;
1890 add_wait_queue(head
, &pt
->iocb
->poll
.wait
);
1893 static int aio_poll(struct aio_kiocb
*aiocb
, const struct iocb
*iocb
)
1895 struct kioctx
*ctx
= aiocb
->ki_ctx
;
1896 struct poll_iocb
*req
= &aiocb
->poll
;
1897 struct aio_poll_table apt
;
1898 bool cancel
= false;
1901 /* reject any unknown events outside the normal event mask. */
1902 if ((u16
)iocb
->aio_buf
!= iocb
->aio_buf
)
1904 /* reject fields that are not defined for poll */
1905 if (iocb
->aio_offset
|| iocb
->aio_nbytes
|| iocb
->aio_rw_flags
)
1908 INIT_WORK(&req
->work
, aio_poll_complete_work
);
1909 req
->events
= demangle_poll(iocb
->aio_buf
) | EPOLLERR
| EPOLLHUP
;
1912 req
->cancelled
= false;
1913 req
->work_scheduled
= false;
1914 req
->work_need_resched
= false;
1916 apt
.pt
._qproc
= aio_poll_queue_proc
;
1917 apt
.pt
._key
= req
->events
;
1920 apt
.error
= -EINVAL
; /* same as no support for IOCB_CMD_POLL */
1922 /* initialized the list so that we can do list_empty checks */
1923 INIT_LIST_HEAD(&req
->wait
.entry
);
1924 init_waitqueue_func_entry(&req
->wait
, aio_poll_wake
);
1926 mask
= vfs_poll(req
->file
, &apt
.pt
) & req
->events
;
1927 spin_lock_irq(&ctx
->ctx_lock
);
1928 if (likely(apt
.queued
)) {
1929 bool on_queue
= poll_iocb_lock_wq(req
);
1931 if (!on_queue
|| req
->work_scheduled
) {
1933 * aio_poll_wake() already either scheduled the async
1934 * completion work, or completed the request inline.
1936 if (apt
.error
) /* unsupported case: multiple queues */
1941 if (mask
|| apt
.error
) {
1942 /* Steal to complete synchronously. */
1943 list_del_init(&req
->wait
.entry
);
1944 } else if (cancel
) {
1945 /* Cancel if possible (may be too late though). */
1946 WRITE_ONCE(req
->cancelled
, true);
1947 } else if (on_queue
) {
1949 * Actually waiting for an event, so add the request to
1950 * active_reqs so that it can be cancelled if needed.
1952 list_add_tail(&aiocb
->ki_list
, &ctx
->active_reqs
);
1953 aiocb
->ki_cancel
= aio_poll_cancel
;
1956 poll_iocb_unlock_wq(req
);
1958 if (mask
) { /* no async, we'd stolen it */
1959 aiocb
->ki_res
.res
= mangle_poll(mask
);
1962 spin_unlock_irq(&ctx
->ctx_lock
);
1968 static int __io_submit_one(struct kioctx
*ctx
, const struct iocb
*iocb
,
1969 struct iocb __user
*user_iocb
, struct aio_kiocb
*req
,
1972 req
->ki_filp
= fget(iocb
->aio_fildes
);
1973 if (unlikely(!req
->ki_filp
))
1976 if (iocb
->aio_flags
& IOCB_FLAG_RESFD
) {
1977 struct eventfd_ctx
*eventfd
;
1979 * If the IOCB_FLAG_RESFD flag of aio_flags is set, get an
1980 * instance of the file* now. The file descriptor must be
1981 * an eventfd() fd, and will be signaled for each completed
1982 * event using the eventfd_signal() function.
1984 eventfd
= eventfd_ctx_fdget(iocb
->aio_resfd
);
1985 if (IS_ERR(eventfd
))
1986 return PTR_ERR(eventfd
);
1988 req
->ki_eventfd
= eventfd
;
1991 if (unlikely(put_user(KIOCB_KEY
, &user_iocb
->aio_key
))) {
1992 pr_debug("EFAULT: aio_key\n");
1996 req
->ki_res
.obj
= (u64
)(unsigned long)user_iocb
;
1997 req
->ki_res
.data
= iocb
->aio_data
;
1998 req
->ki_res
.res
= 0;
1999 req
->ki_res
.res2
= 0;
2001 switch (iocb
->aio_lio_opcode
) {
2002 case IOCB_CMD_PREAD
:
2003 return aio_read(&req
->rw
, iocb
, false, compat
);
2004 case IOCB_CMD_PWRITE
:
2005 return aio_write(&req
->rw
, iocb
, false, compat
);
2006 case IOCB_CMD_PREADV
:
2007 return aio_read(&req
->rw
, iocb
, true, compat
);
2008 case IOCB_CMD_PWRITEV
:
2009 return aio_write(&req
->rw
, iocb
, true, compat
);
2010 case IOCB_CMD_FSYNC
:
2011 return aio_fsync(&req
->fsync
, iocb
, false);
2012 case IOCB_CMD_FDSYNC
:
2013 return aio_fsync(&req
->fsync
, iocb
, true);
2015 return aio_poll(req
, iocb
);
2017 pr_debug("invalid aio operation %d\n", iocb
->aio_lio_opcode
);
2022 static int io_submit_one(struct kioctx
*ctx
, struct iocb __user
*user_iocb
,
2025 struct aio_kiocb
*req
;
2029 if (unlikely(copy_from_user(&iocb
, user_iocb
, sizeof(iocb
))))
2032 /* enforce forwards compatibility on users */
2033 if (unlikely(iocb
.aio_reserved2
)) {
2034 pr_debug("EINVAL: reserve field set\n");
2038 /* prevent overflows */
2040 (iocb
.aio_buf
!= (unsigned long)iocb
.aio_buf
) ||
2041 (iocb
.aio_nbytes
!= (size_t)iocb
.aio_nbytes
) ||
2042 ((ssize_t
)iocb
.aio_nbytes
< 0)
2044 pr_debug("EINVAL: overflow check\n");
2048 req
= aio_get_req(ctx
);
2052 err
= __io_submit_one(ctx
, &iocb
, user_iocb
, req
, compat
);
2054 /* Done with the synchronous reference */
2058 * If err is 0, we'd either done aio_complete() ourselves or have
2059 * arranged for that to be done asynchronously. Anything non-zero
2060 * means that we need to destroy req ourselves.
2062 if (unlikely(err
)) {
2064 put_reqs_available(ctx
, 1);
2070 * Queue the nr iocbs pointed to by iocbpp for processing. Returns
2071 * the number of iocbs queued. May return -EINVAL if the aio_context
2072 * specified by ctx_id is invalid, if nr is < 0, if the iocb at
2073 * *iocbpp[0] is not properly initialized, if the operation specified
2074 * is invalid for the file descriptor in the iocb. May fail with
2075 * -EFAULT if any of the data structures point to invalid data. May
2076 * fail with -EBADF if the file descriptor specified in the first
2077 * iocb is invalid. May fail with -EAGAIN if insufficient resources
2078 * are available to queue any iocbs. Will return 0 if nr is 0. Will
2079 * fail with -ENOSYS if not implemented.
2081 SYSCALL_DEFINE3(io_submit
, aio_context_t
, ctx_id
, long, nr
,
2082 struct iocb __user
* __user
*, iocbpp
)
2087 struct blk_plug plug
;
2089 if (unlikely(nr
< 0))
2092 ctx
= lookup_ioctx(ctx_id
);
2093 if (unlikely(!ctx
)) {
2094 pr_debug("EINVAL: invalid context id\n");
2098 if (nr
> ctx
->nr_events
)
2099 nr
= ctx
->nr_events
;
2101 if (nr
> AIO_PLUG_THRESHOLD
)
2102 blk_start_plug(&plug
);
2103 for (i
= 0; i
< nr
; i
++) {
2104 struct iocb __user
*user_iocb
;
2106 if (unlikely(get_user(user_iocb
, iocbpp
+ i
))) {
2111 ret
= io_submit_one(ctx
, user_iocb
, false);
2115 if (nr
> AIO_PLUG_THRESHOLD
)
2116 blk_finish_plug(&plug
);
2118 percpu_ref_put(&ctx
->users
);
2122 #ifdef CONFIG_COMPAT
2123 COMPAT_SYSCALL_DEFINE3(io_submit
, compat_aio_context_t
, ctx_id
,
2124 int, nr
, compat_uptr_t __user
*, iocbpp
)
2129 struct blk_plug plug
;
2131 if (unlikely(nr
< 0))
2134 ctx
= lookup_ioctx(ctx_id
);
2135 if (unlikely(!ctx
)) {
2136 pr_debug("EINVAL: invalid context id\n");
2140 if (nr
> ctx
->nr_events
)
2141 nr
= ctx
->nr_events
;
2143 if (nr
> AIO_PLUG_THRESHOLD
)
2144 blk_start_plug(&plug
);
2145 for (i
= 0; i
< nr
; i
++) {
2146 compat_uptr_t user_iocb
;
2148 if (unlikely(get_user(user_iocb
, iocbpp
+ i
))) {
2153 ret
= io_submit_one(ctx
, compat_ptr(user_iocb
), true);
2157 if (nr
> AIO_PLUG_THRESHOLD
)
2158 blk_finish_plug(&plug
);
2160 percpu_ref_put(&ctx
->users
);
2166 * Attempts to cancel an iocb previously passed to io_submit. If
2167 * the operation is successfully cancelled, the resulting event is
2168 * copied into the memory pointed to by result without being placed
2169 * into the completion queue and 0 is returned. May fail with
2170 * -EFAULT if any of the data structures pointed to are invalid.
2171 * May fail with -EINVAL if aio_context specified by ctx_id is
2172 * invalid. May fail with -EAGAIN if the iocb specified was not
2173 * cancelled. Will fail with -ENOSYS if not implemented.
2175 SYSCALL_DEFINE3(io_cancel
, aio_context_t
, ctx_id
, struct iocb __user
*, iocb
,
2176 struct io_event __user
*, result
)
2179 struct aio_kiocb
*kiocb
;
2182 u64 obj
= (u64
)(unsigned long)iocb
;
2184 if (unlikely(get_user(key
, &iocb
->aio_key
)))
2186 if (unlikely(key
!= KIOCB_KEY
))
2189 ctx
= lookup_ioctx(ctx_id
);
2193 spin_lock_irq(&ctx
->ctx_lock
);
2194 list_for_each_entry(kiocb
, &ctx
->active_reqs
, ki_list
) {
2195 if (kiocb
->ki_res
.obj
== obj
) {
2196 ret
= kiocb
->ki_cancel(&kiocb
->rw
);
2197 list_del_init(&kiocb
->ki_list
);
2201 spin_unlock_irq(&ctx
->ctx_lock
);
2205 * The result argument is no longer used - the io_event is
2206 * always delivered via the ring buffer. -EINPROGRESS indicates
2207 * cancellation is progress:
2212 percpu_ref_put(&ctx
->users
);
2217 static long do_io_getevents(aio_context_t ctx_id
,
2220 struct io_event __user
*events
,
2221 struct timespec64
*ts
)
2223 ktime_t until
= ts
? timespec64_to_ktime(*ts
) : KTIME_MAX
;
2224 struct kioctx
*ioctx
= lookup_ioctx(ctx_id
);
2227 if (likely(ioctx
)) {
2228 if (likely(min_nr
<= nr
&& min_nr
>= 0))
2229 ret
= read_events(ioctx
, min_nr
, nr
, events
, until
);
2230 percpu_ref_put(&ioctx
->users
);
2237 * Attempts to read at least min_nr events and up to nr events from
2238 * the completion queue for the aio_context specified by ctx_id. If
2239 * it succeeds, the number of read events is returned. May fail with
2240 * -EINVAL if ctx_id is invalid, if min_nr is out of range, if nr is
2241 * out of range, if timeout is out of range. May fail with -EFAULT
2242 * if any of the memory specified is invalid. May return 0 or
2243 * < min_nr if the timeout specified by timeout has elapsed
2244 * before sufficient events are available, where timeout == NULL
2245 * specifies an infinite timeout. Note that the timeout pointed to by
2246 * timeout is relative. Will fail with -ENOSYS if not implemented.
2250 SYSCALL_DEFINE5(io_getevents
, aio_context_t
, ctx_id
,
2253 struct io_event __user
*, events
,
2254 struct __kernel_timespec __user
*, timeout
)
2256 struct timespec64 ts
;
2259 if (timeout
&& unlikely(get_timespec64(&ts
, timeout
)))
2262 ret
= do_io_getevents(ctx_id
, min_nr
, nr
, events
, timeout
? &ts
: NULL
);
2263 if (!ret
&& signal_pending(current
))
2270 struct __aio_sigset
{
2271 const sigset_t __user
*sigmask
;
2275 SYSCALL_DEFINE6(io_pgetevents
,
2276 aio_context_t
, ctx_id
,
2279 struct io_event __user
*, events
,
2280 struct __kernel_timespec __user
*, timeout
,
2281 const struct __aio_sigset __user
*, usig
)
2283 struct __aio_sigset ksig
= { NULL
, };
2284 struct timespec64 ts
;
2288 if (timeout
&& unlikely(get_timespec64(&ts
, timeout
)))
2291 if (usig
&& copy_from_user(&ksig
, usig
, sizeof(ksig
)))
2294 ret
= set_user_sigmask(ksig
.sigmask
, ksig
.sigsetsize
);
2298 ret
= do_io_getevents(ctx_id
, min_nr
, nr
, events
, timeout
? &ts
: NULL
);
2300 interrupted
= signal_pending(current
);
2301 restore_saved_sigmask_unless(interrupted
);
2302 if (interrupted
&& !ret
)
2303 ret
= -ERESTARTNOHAND
;
2308 #if defined(CONFIG_COMPAT_32BIT_TIME) && !defined(CONFIG_64BIT)
2310 SYSCALL_DEFINE6(io_pgetevents_time32
,
2311 aio_context_t
, ctx_id
,
2314 struct io_event __user
*, events
,
2315 struct old_timespec32 __user
*, timeout
,
2316 const struct __aio_sigset __user
*, usig
)
2318 struct __aio_sigset ksig
= { NULL
, };
2319 struct timespec64 ts
;
2323 if (timeout
&& unlikely(get_old_timespec32(&ts
, timeout
)))
2326 if (usig
&& copy_from_user(&ksig
, usig
, sizeof(ksig
)))
2330 ret
= set_user_sigmask(ksig
.sigmask
, ksig
.sigsetsize
);
2334 ret
= do_io_getevents(ctx_id
, min_nr
, nr
, events
, timeout
? &ts
: NULL
);
2336 interrupted
= signal_pending(current
);
2337 restore_saved_sigmask_unless(interrupted
);
2338 if (interrupted
&& !ret
)
2339 ret
= -ERESTARTNOHAND
;
2346 #if defined(CONFIG_COMPAT_32BIT_TIME)
2348 SYSCALL_DEFINE5(io_getevents_time32
, __u32
, ctx_id
,
2351 struct io_event __user
*, events
,
2352 struct old_timespec32 __user
*, timeout
)
2354 struct timespec64 t
;
2357 if (timeout
&& get_old_timespec32(&t
, timeout
))
2360 ret
= do_io_getevents(ctx_id
, min_nr
, nr
, events
, timeout
? &t
: NULL
);
2361 if (!ret
&& signal_pending(current
))
2368 #ifdef CONFIG_COMPAT
2370 struct __compat_aio_sigset
{
2371 compat_uptr_t sigmask
;
2372 compat_size_t sigsetsize
;
2375 #if defined(CONFIG_COMPAT_32BIT_TIME)
2377 COMPAT_SYSCALL_DEFINE6(io_pgetevents
,
2378 compat_aio_context_t
, ctx_id
,
2379 compat_long_t
, min_nr
,
2381 struct io_event __user
*, events
,
2382 struct old_timespec32 __user
*, timeout
,
2383 const struct __compat_aio_sigset __user
*, usig
)
2385 struct __compat_aio_sigset ksig
= { 0, };
2386 struct timespec64 t
;
2390 if (timeout
&& get_old_timespec32(&t
, timeout
))
2393 if (usig
&& copy_from_user(&ksig
, usig
, sizeof(ksig
)))
2396 ret
= set_compat_user_sigmask(compat_ptr(ksig
.sigmask
), ksig
.sigsetsize
);
2400 ret
= do_io_getevents(ctx_id
, min_nr
, nr
, events
, timeout
? &t
: NULL
);
2402 interrupted
= signal_pending(current
);
2403 restore_saved_sigmask_unless(interrupted
);
2404 if (interrupted
&& !ret
)
2405 ret
= -ERESTARTNOHAND
;
2412 COMPAT_SYSCALL_DEFINE6(io_pgetevents_time64
,
2413 compat_aio_context_t
, ctx_id
,
2414 compat_long_t
, min_nr
,
2416 struct io_event __user
*, events
,
2417 struct __kernel_timespec __user
*, timeout
,
2418 const struct __compat_aio_sigset __user
*, usig
)
2420 struct __compat_aio_sigset ksig
= { 0, };
2421 struct timespec64 t
;
2425 if (timeout
&& get_timespec64(&t
, timeout
))
2428 if (usig
&& copy_from_user(&ksig
, usig
, sizeof(ksig
)))
2431 ret
= set_compat_user_sigmask(compat_ptr(ksig
.sigmask
), ksig
.sigsetsize
);
2435 ret
= do_io_getevents(ctx_id
, min_nr
, nr
, events
, timeout
? &t
: NULL
);
2437 interrupted
= signal_pending(current
);
2438 restore_saved_sigmask_unless(interrupted
);
2439 if (interrupted
&& !ret
)
2440 ret
= -ERESTARTNOHAND
;