1 /* libbdev - block device interfacing library, by D.C. van Moolenbroek */
3 #include <minix/drivers.h>
4 #include <minix/bdev.h>
5 #include <minix/ioctl.h>
12 void bdev_driver(dev_t dev
, char *label
)
14 /* Associate a driver with the given (major) device, using its endpoint.
15 * File system usage note: typically called from mount and newdriver.
17 static int first
= TRUE
;
20 /* Initialize the driver endpoint array. */
26 bdev_update(dev
, label
);
29 static int bdev_retry(int *driver_tries
, int *transfer_tries
, int *result
)
31 /* Return TRUE iff the call result implies that we should retry the operation.
36 /* We get this error internally if the driver has restarted and the
37 * current operation may now go through. Check the retry count for
38 * driver restarts first, as we don't want to keep trying forever.
40 if (++*driver_tries
< DRIVER_TRIES
)
43 *result
= EDEADSRCDST
;
48 /* The 'transfer_tries' pointer is non-NULL if this was a transfer
49 * request. If we get back an I/O failure, keep retrying the request
50 * until we hit the transfer retry limit.
52 if (transfer_tries
!= NULL
&& ++*transfer_tries
< TRANSFER_TRIES
)
61 static int bdev_opcl(int req
, dev_t dev
, int bits
)
63 /* Open or close the given minor device.
66 int r
, driver_tries
= 0;
69 memset(&m
, 0, sizeof(m
));
71 m
.m_lbdev_lblockdriver_msg
.minor
= minor(dev
);
72 m
.m_lbdev_lblockdriver_msg
.access
= bits
;
74 r
= bdev_sendrec(dev
, &m
);
75 } while (bdev_retry(&driver_tries
, NULL
, &r
));
80 int bdev_open(dev_t dev
, int bits
)
82 /* Open the given minor device.
83 * File system usage note: typically called from mount, after bdev_driver.
87 r
= bdev_opcl(BDEV_OPEN
, dev
, bits
);
90 bdev_minor_add(dev
, bits
);
95 int bdev_close(dev_t dev
)
97 /* Close the given minor device.
98 * File system usage note: typically called from unmount.
102 bdev_flush_asyn(dev
);
104 r
= bdev_opcl(BDEV_CLOSE
, dev
, 0);
112 static int bdev_rdwt_setup(int req
, dev_t dev
, u64_t pos
, char *buf
,
113 size_t count
, int flags
, message
*m
)
115 /* Set up a single-buffer read/write request.
121 assert((ssize_t
) count
>= 0);
123 if ((endpt
= bdev_driver_get(dev
)) == NONE
)
126 perm
= (req
== BDEV_READ
) ? CPF_WRITE
: CPF_READ
;
128 grant
= cpf_grant_direct(endpt
, (vir_bytes
) buf
, count
, perm
);
130 if (!GRANT_VALID(grant
)) {
131 printf("bdev: unable to allocate grant!\n");
135 memset(m
, 0, sizeof(*m
));
137 m
->m_lbdev_lblockdriver_msg
.minor
= minor(dev
);
138 m
->m_lbdev_lblockdriver_msg
.pos
= pos
;
139 m
->m_lbdev_lblockdriver_msg
.count
= count
;
140 m
->m_lbdev_lblockdriver_msg
.grant
= grant
;
141 m
->m_lbdev_lblockdriver_msg
.flags
= flags
;
146 static void bdev_rdwt_cleanup(const message
*m
)
148 /* Clean up a single-buffer read/write request.
151 cpf_revoke(m
->m_lbdev_lblockdriver_msg
.grant
);
154 static ssize_t
bdev_rdwt(int req
, dev_t dev
, u64_t pos
, char *buf
,
155 size_t count
, int flags
)
157 /* Perform a synchronous read or write call using a single buffer.
160 int r
, driver_tries
= 0, transfer_tries
= 0;
163 if ((r
= bdev_rdwt_setup(req
, dev
, pos
, buf
, count
, flags
, &m
)) != OK
)
166 r
= bdev_sendrec(dev
, &m
);
168 bdev_rdwt_cleanup(&m
);
169 } while (bdev_retry(&driver_tries
, &transfer_tries
, &r
));
174 static int bdev_vrdwt_setup(int req
, dev_t dev
, u64_t pos
, iovec_t
*vec
,
175 int count
, int flags
, message
*m
, iovec_s_t
*gvec
)
177 /* Set up a vectored read/write request.
184 assert(count
<= NR_IOREQS
);
186 if ((endpt
= bdev_driver_get(dev
)) == NONE
)
189 perm
= (req
== BDEV_GATHER
) ? CPF_WRITE
: CPF_READ
;
192 for (i
= 0; i
< count
; i
++) {
193 grant
= cpf_grant_direct(endpt
, vec
[i
].iov_addr
, vec
[i
].iov_size
,
196 if (!GRANT_VALID(grant
)) {
197 printf("bdev: unable to allocate grant!\n");
199 for (i
--; i
>= 0; i
--)
200 cpf_revoke(gvec
[i
].iov_grant
);
205 gvec
[i
].iov_grant
= grant
;
206 gvec
[i
].iov_size
= vec
[i
].iov_size
;
208 assert(vec
[i
].iov_size
> 0);
209 assert((ssize_t
) (size
+ vec
[i
].iov_size
) > size
);
211 size
+= vec
[i
].iov_size
;
214 grant
= cpf_grant_direct(endpt
, (vir_bytes
) gvec
, sizeof(gvec
[0]) * count
,
217 if (!GRANT_VALID(grant
)) {
218 printf("bdev: unable to allocate grant!\n");
220 for (i
= count
- 1; i
>= 0; i
--)
221 cpf_revoke(gvec
[i
].iov_grant
);
226 memset(m
, 0, sizeof(*m
));
228 m
->m_lbdev_lblockdriver_msg
.minor
= minor(dev
);
229 m
->m_lbdev_lblockdriver_msg
.pos
= pos
;
230 m
->m_lbdev_lblockdriver_msg
.count
= count
;
231 m
->m_lbdev_lblockdriver_msg
.grant
= grant
;
232 m
->m_lbdev_lblockdriver_msg
.flags
= flags
;
237 static void bdev_vrdwt_cleanup(const message
*m
, iovec_s_t
*gvec
)
239 /* Clean up a vectored read/write request.
244 grant
= m
->m_lbdev_lblockdriver_msg
.grant
;
248 for (i
= m
->m_lbdev_lblockdriver_msg
.count
- 1; i
>= 0; i
--)
249 cpf_revoke(gvec
[i
].iov_grant
);
252 static ssize_t
bdev_vrdwt(int req
, dev_t dev
, u64_t pos
, iovec_t
*vec
,
253 int count
, int flags
)
255 /* Perform a synchronous read or write call using a vector of buffers.
257 iovec_s_t gvec
[NR_IOREQS
];
259 int r
, driver_tries
= 0, transfer_tries
= 0;
262 if ((r
= bdev_vrdwt_setup(req
, dev
, pos
, vec
, count
, flags
, &m
,
266 r
= bdev_sendrec(dev
, &m
);
268 bdev_vrdwt_cleanup(&m
, gvec
);
269 } while (bdev_retry(&driver_tries
, &transfer_tries
, &r
));
274 ssize_t
bdev_read(dev_t dev
, u64_t pos
, char *buf
, size_t count
, int flags
)
276 /* Perform a synchronous read call into a single buffer.
279 return bdev_rdwt(BDEV_READ
, dev
, pos
, buf
, count
, flags
);
282 ssize_t
bdev_write(dev_t dev
, u64_t pos
, char *buf
, size_t count
, int flags
)
284 /* Perform a synchronous write call from a single buffer.
287 return bdev_rdwt(BDEV_WRITE
, dev
, pos
, buf
, count
, flags
);
290 ssize_t
bdev_gather(dev_t dev
, u64_t pos
, iovec_t
*vec
, int count
, int flags
)
292 /* Perform a synchronous read call into a vector of buffers.
295 return bdev_vrdwt(BDEV_GATHER
, dev
, pos
, vec
, count
, flags
);
298 ssize_t
bdev_scatter(dev_t dev
, u64_t pos
, iovec_t
*vec
, int count
, int flags
)
300 /* Perform a synchronous write call from a vector of buffers.
303 return bdev_vrdwt(BDEV_SCATTER
, dev
, pos
, vec
, count
, flags
);
306 static int bdev_ioctl_setup(dev_t dev
, unsigned long request
, void *buf
,
307 endpoint_t user_endpt
, message
*m
)
309 /* Set up an I/O control request.
316 if ((endpt
= bdev_driver_get(dev
)) == NONE
)
319 if (_MINIX_IOCTL_BIG(request
))
320 size
= _MINIX_IOCTL_SIZE_BIG(request
);
322 size
= _MINIX_IOCTL_SIZE(request
);
325 if (_MINIX_IOCTL_IOR(request
)) perm
|= CPF_WRITE
;
326 if (_MINIX_IOCTL_IOW(request
)) perm
|= CPF_READ
;
328 /* The size may be 0, in which case 'buf' need not be a valid pointer. */
329 grant
= cpf_grant_direct(endpt
, (vir_bytes
) buf
, size
, perm
);
331 if (!GRANT_VALID(grant
)) {
332 printf("bdev: unable to allocate grant!\n");
336 memset(m
, 0, sizeof(*m
));
337 m
->m_type
= BDEV_IOCTL
;
338 m
->m_lbdev_lblockdriver_msg
.minor
= minor(dev
);
339 m
->m_lbdev_lblockdriver_msg
.request
= request
;
340 m
->m_lbdev_lblockdriver_msg
.grant
= grant
;
341 m
->m_lbdev_lblockdriver_msg
.user
= user_endpt
;
346 static void bdev_ioctl_cleanup(const message
*m
)
348 /* Clean up an I/O control request.
351 cpf_revoke(m
->m_lbdev_lblockdriver_msg
.grant
);
354 int bdev_ioctl(dev_t dev
, unsigned long request
, void *buf
,
355 endpoint_t user_endpt
)
357 /* Perform a synchronous I/O control request.
360 int r
, driver_tries
= 0;
363 if ((r
= bdev_ioctl_setup(dev
, request
, buf
, user_endpt
, &m
)) != OK
)
366 r
= bdev_sendrec(dev
, &m
);
368 bdev_ioctl_cleanup(&m
);
369 } while (bdev_retry(&driver_tries
, NULL
, &r
));
374 void bdev_flush_asyn(dev_t dev
)
376 /* Flush all ongoing asynchronous requests to the given minor device. This
377 * involves blocking until all I/O for it has completed.
378 * File system usage note: typically called from flush.
382 while ((call
= bdev_call_find(dev
)) != NULL
)
383 (void) bdev_wait_asyn(call
->id
);
386 static bdev_id_t
bdev_rdwt_asyn(int req
, dev_t dev
, u64_t pos
, char *buf
,
387 size_t count
, int flags
, bdev_callback_t callback
, bdev_param_t param
)
389 /* Perform an asynchronous read or write call using a single buffer.
394 if ((call
= bdev_call_alloc(1)) == NULL
)
397 if ((r
= bdev_rdwt_setup(req
, dev
, pos
, buf
, count
, flags
, &call
->msg
)) !=
399 bdev_call_free(call
);
404 if ((r
= bdev_senda(dev
, &call
->msg
, call
->id
)) != OK
) {
405 bdev_rdwt_cleanup(&call
->msg
);
407 bdev_call_free(call
);
413 call
->callback
= callback
;
415 call
->driver_tries
= 0;
416 call
->transfer_tries
= 0;
417 call
->vec
[0].iov_addr
= (vir_bytes
) buf
;
418 call
->vec
[0].iov_size
= count
;
423 static bdev_id_t
bdev_vrdwt_asyn(int req
, dev_t dev
, u64_t pos
, iovec_t
*vec
,
424 int count
, int flags
, bdev_callback_t callback
, bdev_param_t param
)
426 /* Perform an asynchronous read or write call using a vector of buffers.
431 if ((call
= bdev_call_alloc(count
)) == NULL
)
434 if ((r
= bdev_vrdwt_setup(req
, dev
, pos
, vec
, count
, flags
, &call
->msg
,
435 call
->gvec
)) != OK
) {
436 bdev_call_free(call
);
441 if ((r
= bdev_senda(dev
, &call
->msg
, call
->id
)) != OK
) {
442 bdev_vrdwt_cleanup(&call
->msg
, call
->gvec
);
444 bdev_call_free(call
);
450 call
->callback
= callback
;
452 call
->driver_tries
= 0;
453 call
->transfer_tries
= 0;
454 memcpy(call
->vec
, vec
, sizeof(vec
[0]) * count
);
459 bdev_id_t
bdev_read_asyn(dev_t dev
, u64_t pos
, char *buf
, size_t count
,
460 int flags
, bdev_callback_t callback
, bdev_param_t param
)
462 /* Perform an asynchronous read call into a single buffer.
465 return bdev_rdwt_asyn(BDEV_READ
, dev
, pos
, buf
, count
, flags
, callback
,
469 bdev_id_t
bdev_write_asyn(dev_t dev
, u64_t pos
, char *buf
, size_t count
,
470 int flags
, bdev_callback_t callback
, bdev_param_t param
)
472 /* Perform an asynchronous write call from a single buffer.
475 return bdev_rdwt_asyn(BDEV_WRITE
, dev
, pos
, buf
, count
, flags
, callback
,
479 bdev_id_t
bdev_gather_asyn(dev_t dev
, u64_t pos
, iovec_t
*vec
, int count
,
480 int flags
, bdev_callback_t callback
, bdev_param_t param
)
482 /* Perform an asynchronous read call into a vector of buffers.
485 return bdev_vrdwt_asyn(BDEV_GATHER
, dev
, pos
, vec
, count
, flags
, callback
,
489 bdev_id_t
bdev_scatter_asyn(dev_t dev
, u64_t pos
, iovec_t
*vec
, int count
,
490 int flags
, bdev_callback_t callback
, bdev_param_t param
)
492 /* Perform an asynchronous write call into a vector of buffers.
495 return bdev_vrdwt_asyn(BDEV_SCATTER
, dev
, pos
, vec
, count
, flags
, callback
,
499 bdev_id_t
bdev_ioctl_asyn(dev_t dev
, unsigned long request
, void *buf
,
500 endpoint_t user_endpt
, bdev_callback_t callback
, bdev_param_t param
)
502 /* Perform an asynchronous I/O control request.
507 if ((call
= bdev_call_alloc(1)) == NULL
)
510 if ((r
= bdev_ioctl_setup(dev
, request
, buf
, user_endpt
,
511 &call
->msg
)) != OK
) {
512 bdev_call_free(call
);
517 if ((r
= bdev_senda(dev
, &call
->msg
, call
->id
)) != OK
) {
518 bdev_ioctl_cleanup(&call
->msg
);
520 bdev_call_free(call
);
526 call
->callback
= callback
;
528 call
->driver_tries
= 0;
529 call
->vec
[0].iov_addr
= (vir_bytes
) buf
;
534 void bdev_callback_asyn(bdev_call_t
*call
, int result
)
536 /* Perform the callback for an asynchronous request, with the given result.
537 * Clean up the call structure afterwards.
540 /* If this was a transfer request and the result is EIO, we may want to retry
543 switch (call
->msg
.m_type
) {
548 if (result
== EIO
&& ++call
->transfer_tries
< TRANSFER_TRIES
) {
549 result
= bdev_senda(call
->dev
, &call
->msg
, call
->id
);
557 switch (call
->msg
.m_type
) {
560 bdev_rdwt_cleanup(&call
->msg
);
566 bdev_vrdwt_cleanup(&call
->msg
, call
->gvec
);
571 bdev_ioctl_cleanup(&call
->msg
);
579 /* Call the callback function. */
580 /* FIXME: we assume all reasonable ssize_t values can be stored in an int. */
581 call
->callback(call
->dev
, call
->id
, call
->param
, result
);
583 /* Free up the call structure. */
584 bdev_call_free(call
);
587 int bdev_restart_asyn(bdev_call_t
*call
)
589 /* The driver for the given call has restarted, and may now have a new
590 * endpoint. Recreate and resend the request for the given call.
594 /* Update and check the retry limit for driver restarts first. */
595 if (++call
->driver_tries
>= DRIVER_TRIES
)
598 /* Recreate all grants for the new endpoint. */
599 type
= call
->msg
.m_type
;
604 bdev_rdwt_cleanup(&call
->msg
);
606 r
= bdev_rdwt_setup(type
, call
->dev
,
607 call
->msg
.m_lbdev_lblockdriver_msg
.pos
,
608 (char *) call
->vec
[0].iov_addr
, call
->msg
.m_lbdev_lblockdriver_msg
.count
,
609 call
->msg
.m_lbdev_lblockdriver_msg
.flags
, &call
->msg
);
615 bdev_vrdwt_cleanup(&call
->msg
, call
->gvec
);
617 r
= bdev_vrdwt_setup(type
, call
->dev
,
618 call
->msg
.m_lbdev_lblockdriver_msg
.pos
,
619 call
->vec
, call
->msg
.m_lbdev_lblockdriver_msg
.count
, call
->msg
.m_lbdev_lblockdriver_msg
.flags
,
620 &call
->msg
, call
->gvec
);
625 bdev_ioctl_cleanup(&call
->msg
);
627 r
= bdev_ioctl_setup(call
->dev
, call
->msg
.m_lbdev_lblockdriver_msg
.request
,
628 (char *) call
->vec
[0].iov_addr
, call
->msg
.m_lbdev_lblockdriver_msg
.user
,
640 /* Try to resend the request. */
641 return bdev_senda(call
->dev
, &call
->msg
, call
->id
);