2 * Copyright (C) 2004 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Licensed under the GPL
11 #include <asm/unistd.h>
14 #include "kern_util.h"
17 struct aio_thread_req
{
20 unsigned long long offset
;
23 struct aio_context
*aio
;
26 #if defined(HAVE_AIO_ABI)
27 #include <linux/aio_abi.h>
30 * If we have the headers, we are going to build with AIO enabled.
31 * If we don't have aio in libc, we define the necessary stubs here.
34 #if !defined(HAVE_AIO_LIBC)
36 static long io_setup(int n
, aio_context_t
*ctxp
)
38 return syscall(__NR_io_setup
, n
, ctxp
);
41 static long io_submit(aio_context_t ctx
, long nr
, struct iocb
**iocbpp
)
43 return syscall(__NR_io_submit
, ctx
, nr
, iocbpp
);
46 static long io_getevents(aio_context_t ctx_id
, long min_nr
, long nr
,
47 struct io_event
*events
, struct timespec
*timeout
)
49 return syscall(__NR_io_getevents
, ctx_id
, min_nr
, nr
, events
, timeout
);
55 * The AIO_MMAP cases force the mmapped page into memory here
56 * rather than in whatever place first touches the data. I used
57 * to do this by touching the page, but that's delicate because
58 * gcc is prone to optimizing that away. So, what's done here
59 * is we read from the descriptor from which the page was
60 * mapped. The caller is required to pass an offset which is
61 * inside the page that was mapped. Thus, when the read
62 * returns, we know that the page is in the page cache, and
63 * that it now backs the mmapped area.
66 static int do_aio(aio_context_t ctx
, enum aio_type type
, int fd
, char *buf
,
67 int len
, unsigned long long offset
, struct aio_context
*aio
)
69 struct iocb
*iocbp
= & ((struct iocb
) {
70 .aio_data
= (unsigned long) aio
,
72 .aio_buf
= (unsigned long) buf
,
80 iocbp
->aio_lio_opcode
= IOCB_CMD_PREAD
;
83 iocbp
->aio_lio_opcode
= IOCB_CMD_PWRITE
;
86 iocbp
->aio_lio_opcode
= IOCB_CMD_PREAD
;
87 iocbp
->aio_buf
= (unsigned long) &c
;
88 iocbp
->aio_nbytes
= sizeof(c
);
91 printk(UM_KERN_ERR
"Bogus op in do_aio - %d\n", type
);
95 return (io_submit(ctx
, 1, &iocbp
) > 0) ? 0 : -errno
;
98 /* Initialized in an initcall and unchanged thereafter */
99 static aio_context_t ctx
= 0;
101 static int aio_thread(void *arg
)
103 struct aio_thread_reply reply
;
104 struct io_event event
;
105 int err
, n
, reply_fd
;
107 signal(SIGWINCH
, SIG_IGN
);
110 n
= io_getevents(ctx
, 1, 1, &event
, NULL
);
114 printk(UM_KERN_ERR
"aio_thread - io_getevents failed, "
115 "errno = %d\n", errno
);
118 reply
= ((struct aio_thread_reply
)
119 { .data
= (void *) (long) event
.data
,
121 reply_fd
= ((struct aio_context
*) reply
.data
)->reply_fd
;
122 err
= write(reply_fd
, &reply
, sizeof(reply
));
123 if (err
!= sizeof(reply
))
124 printk(UM_KERN_ERR
"aio_thread - write failed, "
125 "fd = %d, err = %d\n", reply_fd
, errno
);
133 static int do_not_aio(struct aio_thread_req
*req
)
136 unsigned long long actual
;
139 actual
= lseek64(req
->io_fd
, req
->offset
, SEEK_SET
);
140 if (actual
!= req
->offset
)
145 n
= read(req
->io_fd
, req
->buf
, req
->len
);
148 n
= write(req
->io_fd
, req
->buf
, req
->len
);
151 n
= read(req
->io_fd
, &c
, sizeof(c
));
154 printk(UM_KERN_ERR
"do_not_aio - bad request type : %d\n",
164 /* These are initialized in initcalls and not changed */
165 static int aio_req_fd_r
= -1;
166 static int aio_req_fd_w
= -1;
167 static int aio_pid
= -1;
168 static unsigned long aio_stack
;
170 static int not_aio_thread(void *arg
)
172 struct aio_thread_req req
;
173 struct aio_thread_reply reply
;
176 signal(SIGWINCH
, SIG_IGN
);
178 err
= read(aio_req_fd_r
, &req
, sizeof(req
));
179 if (err
!= sizeof(req
)) {
181 printk(UM_KERN_ERR
"not_aio_thread - "
182 "read failed, fd = %d, err = %d\n",
186 printk(UM_KERN_ERR
"not_aio_thread - short "
187 "read, fd = %d, length = %d\n",
192 err
= do_not_aio(&req
);
193 reply
= ((struct aio_thread_reply
) { .data
= req
.aio
,
195 err
= write(req
.aio
->reply_fd
, &reply
, sizeof(reply
));
196 if (err
!= sizeof(reply
))
197 printk(UM_KERN_ERR
"not_aio_thread - write failed, "
198 "fd = %d, err = %d\n", req
.aio
->reply_fd
, errno
);
204 static int init_aio_24(void)
208 err
= os_pipe(fds
, 1, 1);
212 aio_req_fd_w
= fds
[0];
213 aio_req_fd_r
= fds
[1];
215 err
= os_set_fd_block(aio_req_fd_w
, 0);
219 err
= run_helper_thread(not_aio_thread
, NULL
,
220 CLONE_FILES
| CLONE_VM
, &aio_stack
);
234 printk(UM_KERN_INFO
"/usr/include/linux/aio_abi.h not present during "
237 printk(UM_KERN_INFO
"2.6 host AIO support not used - falling back to "
243 #define DEFAULT_24_AIO 0
244 static int init_aio_26(void)
248 if (io_setup(256, &ctx
)) {
250 printk(UM_KERN_ERR
"aio_thread failed to initialize context, "
251 "err = %d\n", errno
);
255 err
= run_helper_thread(aio_thread
, NULL
,
256 CLONE_FILES
| CLONE_VM
, &aio_stack
);
262 printk(UM_KERN_INFO
"Using 2.6 host AIO\n");
266 static int submit_aio_26(enum aio_type type
, int io_fd
, char *buf
, int len
,
267 unsigned long long offset
, struct aio_context
*aio
)
269 struct aio_thread_reply reply
;
272 err
= do_aio(ctx
, type
, io_fd
, buf
, len
, offset
, aio
);
274 reply
= ((struct aio_thread_reply
) { .data
= aio
,
276 err
= write(aio
->reply_fd
, &reply
, sizeof(reply
));
277 if (err
!= sizeof(reply
)) {
279 printk(UM_KERN_ERR
"submit_aio_26 - write failed, "
280 "fd = %d, err = %d\n", aio
->reply_fd
, -err
);
289 #define DEFAULT_24_AIO 1
290 static int init_aio_26(void)
295 static int submit_aio_26(enum aio_type type
, int io_fd
, char *buf
, int len
,
296 unsigned long long offset
, struct aio_context
*aio
)
302 /* Initialized in an initcall and unchanged thereafter */
303 static int aio_24
= DEFAULT_24_AIO
;
305 static int __init
set_aio_24(char *name
, int *add
)
311 __uml_setup("aio=2.4", set_aio_24
,
313 " This is used to force UML to use 2.4-style AIO even when 2.6 AIO is\n"
314 " available. 2.4 AIO is a single thread that handles one request at a\n"
315 " time, synchronously. 2.6 AIO is a thread which uses the 2.6 AIO \n"
316 " interface to handle an arbitrary number of pending requests. 2.6 AIO \n"
317 " is not available in tt mode, on 2.4 hosts, or when UML is built with\n"
318 " /usr/include/linux/aio_abi.h not available. Many distributions don't\n"
319 " include aio_abi.h, so you will need to copy it from a kernel tree to\n"
320 " your /usr/include/linux in order to build an AIO-capable UML\n\n"
323 static int init_aio(void)
329 if (err
&& (errno
== ENOSYS
)) {
330 printk(UM_KERN_INFO
"2.6 AIO not supported on the "
331 "host - reverting to 2.4 AIO\n");
338 return init_aio_24();
344 * The reason for the __initcall/__uml_exitcall asymmetry is that init_aio
345 * needs to be called when the kernel is running because it calls run_helper,
346 * which needs get_free_page. exit_aio is a __uml_exitcall because the generic
347 * kernel does not run __exitcalls on shutdown, and can't because many of them
348 * break when called outside of module unloading.
350 __initcall(init_aio
);
352 static void exit_aio(void)
355 os_kill_process(aio_pid
, 1);
356 free_stack(aio_stack
, 0);
360 __uml_exitcall(exit_aio
);
362 static int submit_aio_24(enum aio_type type
, int io_fd
, char *buf
, int len
,
363 unsigned long long offset
, struct aio_context
*aio
)
365 struct aio_thread_req req
= { .type
= type
,
374 err
= write(aio_req_fd_w
, &req
, sizeof(req
));
375 if (err
== sizeof(req
))
382 int submit_aio(enum aio_type type
, int io_fd
, char *buf
, int len
,
383 unsigned long long offset
, int reply_fd
,
384 struct aio_context
*aio
)
386 aio
->reply_fd
= reply_fd
;
388 return submit_aio_24(type
, io_fd
, buf
, len
, offset
, aio
);
390 return submit_aio_26(type
, io_fd
, buf
, len
, offset
, aio
);