2 * Copyright (C) 2004 Jeff Dike (jdike@addtoit.com)
3 * Licensed under the GPL
12 #include <sys/syscall.h>
20 static int aio_req_fd_r
= -1;
21 static int aio_req_fd_w
= -1;
23 static int update_aio(struct aio_context
*aio
, int res
)
27 else if((res
== 0) && (aio
->type
== AIO_READ
)){
28 /* This is the EOF case - we have hit the end of the file
29 * and it ends in a partial block, so we fill the end of
30 * the block with zeros and claim success.
32 memset(aio
->data
, 0, aio
->len
);
45 #if defined(HAVE_AIO_ABI)
46 #include <linux/aio_abi.h>
48 /* If we have the headers, we are going to build with AIO enabled.
49 * If we don't have aio in libc, we define the necessary stubs here.
52 #if !defined(HAVE_AIO_LIBC)
54 static long io_setup(int n
, aio_context_t
*ctxp
)
56 return syscall(__NR_io_setup
, n
, ctxp
);
59 static long io_submit(aio_context_t ctx
, long nr
, struct iocb
**iocbpp
)
61 return syscall(__NR_io_submit
, ctx
, nr
, iocbpp
);
64 static long io_getevents(aio_context_t ctx_id
, long min_nr
, long nr
,
65 struct io_event
*events
, struct timespec
*timeout
)
67 return syscall(__NR_io_getevents
, ctx_id
, min_nr
, nr
, events
, timeout
);
72 /* The AIO_MMAP cases force the mmapped page into memory here
73 * rather than in whatever place first touches the data. I used
74 * to do this by touching the page, but that's delicate because
75 * gcc is prone to optimizing that away. So, what's done here
76 * is we read from the descriptor from which the page was
77 * mapped. The caller is required to pass an offset which is
78 * inside the page that was mapped. Thus, when the read
79 * returns, we know that the page is in the page cache, and
80 * that it now backs the mmapped area.
83 static int do_aio(aio_context_t ctx
, struct aio_context
*aio
)
85 struct iocb iocb
, *iocbp
= &iocb
;
89 iocb
= ((struct iocb
) { .aio_data
= (unsigned long) aio
,
91 .aio_fildes
= aio
->fd
,
92 .aio_buf
= (unsigned long) aio
->data
,
93 .aio_nbytes
= aio
->len
,
94 .aio_offset
= aio
->offset
,
97 .aio_reserved3
= 0 });
101 iocb
.aio_lio_opcode
= IOCB_CMD_PREAD
;
104 iocb
.aio_lio_opcode
= IOCB_CMD_PWRITE
;
107 iocb
.aio_lio_opcode
= IOCB_CMD_PREAD
;
108 iocb
.aio_buf
= (unsigned long) &c
;
109 iocb
.aio_nbytes
= sizeof(c
);
112 printk("Bogus op in do_aio - %d\n", aio
->type
);
117 err
= io_submit(ctx
, 1, &iocbp
);
125 static aio_context_t ctx
= 0;
127 static int aio_thread(void *arg
)
129 struct aio_thread_reply reply
;
130 struct aio_context
*aio
;
131 struct io_event event
;
134 signal(SIGWINCH
, SIG_IGN
);
137 n
= io_getevents(ctx
, 1, 1, &event
, NULL
);
141 printk("aio_thread - io_getevents failed, "
142 "errno = %d\n", errno
);
145 aio
= (struct aio_context
*) event
.data
;
146 if(update_aio(aio
, event
.res
)){
151 reply
= ((struct aio_thread_reply
)
154 err
= os_write_file(aio
->reply_fd
, &reply
,
156 if(err
!= sizeof(reply
))
157 printk("aio_thread - write failed, "
158 "fd = %d, err = %d\n", aio
->reply_fd
,
167 static int do_not_aio(struct aio_context
*aio
)
174 err
= os_seek_file(aio
->fd
, aio
->offset
);
178 err
= os_read_file(aio
->fd
, aio
->data
, aio
->len
);
181 err
= os_seek_file(aio
->fd
, aio
->offset
);
185 err
= os_write_file(aio
->fd
, aio
->data
, aio
->len
);
188 err
= os_seek_file(aio
->fd
, aio
->offset
);
192 err
= os_read_file(aio
->fd
, &c
, sizeof(c
));
195 printk("do_not_aio - bad request type : %d\n", aio
->type
);
204 static int not_aio_thread(void *arg
)
206 struct aio_context
*aio
;
207 struct aio_thread_reply reply
;
210 signal(SIGWINCH
, SIG_IGN
);
212 err
= os_read_file(aio_req_fd_r
, &aio
, sizeof(aio
));
213 if(err
!= sizeof(aio
)){
215 printk("not_aio_thread - read failed, "
216 "fd = %d, err = %d\n", aio_req_fd_r
,
219 printk("not_aio_thread - short read, fd = %d, "
220 "length = %d\n", aio_req_fd_r
, err
);
225 err
= do_not_aio(aio
);
227 if(update_aio(aio
, err
))
230 reply
= ((struct aio_thread_reply
) { .data
= aio
,
232 err
= os_write_file(aio
->reply_fd
, &reply
, sizeof(reply
));
233 if(err
!= sizeof(reply
))
234 printk("not_aio_thread - write failed, fd = %d, "
235 "err = %d\n", aio_req_fd_r
, -err
);
239 static int submit_aio_24(struct aio_context
*aio
)
243 err
= os_write_file(aio_req_fd_w
, &aio
, sizeof(aio
));
244 if(err
== sizeof(aio
))
250 static int aio_pid
= -1;
251 static int (*submit_proc
)(struct aio_context
*aio
);
253 static int init_aio_24(void)
258 err
= os_pipe(fds
, 1, 1);
262 aio_req_fd_w
= fds
[0];
263 aio_req_fd_r
= fds
[1];
264 err
= run_helper_thread(not_aio_thread
, NULL
,
265 CLONE_FILES
| CLONE_VM
| SIGCHLD
, &stack
, 0);
273 os_close_file(fds
[0]);
274 os_close_file(fds
[1]);
279 printk("/usr/include/linux/aio_abi.h not present during build\n");
281 printk("2.6 host AIO support not used - falling back to I/O "
284 submit_proc
= submit_aio_24
;
290 #define DEFAULT_24_AIO 0
291 static int submit_aio_26(struct aio_context
*aio
)
293 struct aio_thread_reply reply
;
296 err
= do_aio(ctx
, aio
);
298 reply
= ((struct aio_thread_reply
) { .data
= aio
,
300 err
= os_write_file(aio
->reply_fd
, &reply
, sizeof(reply
));
301 if(err
!= sizeof(reply
))
302 printk("submit_aio_26 - write failed, "
303 "fd = %d, err = %d\n", aio
->reply_fd
, -err
);
310 static int init_aio_26(void)
315 if(io_setup(256, &ctx
)){
316 printk("aio_thread failed to initialize context, err = %d\n",
321 err
= run_helper_thread(aio_thread
, NULL
,
322 CLONE_FILES
| CLONE_VM
| SIGCHLD
, &stack
, 0);
328 printk("Using 2.6 host AIO\n");
330 submit_proc
= submit_aio_26
;
336 #define DEFAULT_24_AIO 1
337 static int submit_aio_26(struct aio_context
*aio
)
342 static int init_aio_26(void)
344 submit_proc
= submit_aio_26
;
349 static int aio_24
= DEFAULT_24_AIO
;
351 static int __init
set_aio_24(char *name
, int *add
)
357 __uml_setup("aio=2.4", set_aio_24
,
359 " This is used to force UML to use 2.4-style AIO even when 2.6 AIO is\n"
360 " available. 2.4 AIO is a single thread that handles one request at a\n"
361 " time, synchronously. 2.6 AIO is a thread which uses the 2.6 AIO \n"
362 " interface to handle an arbitrary number of pending requests. 2.6 AIO \n"
363 " is not available in tt mode, on 2.4 hosts, or when UML is built with\n"
364 " /usr/include/linux/aio_abi.h not available. Many distributions don't\n"
365 " include aio_abi.h, so you will need to copy it from a kernel tree to\n"
366 " your /usr/include/linux in order to build an AIO-capable UML\n\n"
369 static int init_aio(void)
375 printk("Disabling 2.6 AIO in tt mode\n");
381 if(err
&& (errno
== ENOSYS
)){
382 printk("2.6 AIO not supported on the host - "
383 "reverting to 2.4 AIO\n");
390 return init_aio_24();
395 /* The reason for the __initcall/__uml_exitcall asymmetry is that init_aio
396 * needs to be called when the kernel is running because it calls run_helper,
397 * which needs get_free_page. exit_aio is a __uml_exitcall because the generic
398 * kernel does not run __exitcalls on shutdown, and can't because many of them
399 * break when called outside of module unloading.
401 __initcall(init_aio
);
403 static void exit_aio(void)
406 os_kill_process(aio_pid
, 1);
409 __uml_exitcall(exit_aio
);
411 int submit_aio(struct aio_context
*aio
)
413 return (*submit_proc
)(aio
);