1 /* This file contains device independent device driver interface.
4 * Jul 25, 2005 added SYS_SIG type for signals (Jorrit N. Herder)
5 * Sep 15, 2004 added SYN_ALARM type for timeouts (Jorrit N. Herder)
6 * Jul 23, 2004 removed kernel dependencies (Jorrit N. Herder)
7 * Apr 02, 1992 constructed from AT wini and floppy driver (Kees J. Bot)
10 * The drivers support the following operations (using message format m2):
12 * m_type DEVICE IO_ENDPT COUNT POSITION HIGHPOS IO_GRANT
13 * ----------------------------------------------------------------------------
14 * | DEV_OPEN | device | proc nr | | | | |
15 * |---------------+--------+---------+---------+--------+--------+-----------|
16 * | DEV_CLOSE | device | proc nr | | | | |
17 * |---------------+--------+---------+---------+--------+--------+-----------|
18 * | DEV_READ_S | device | proc nr | bytes | off lo | off hi i buf grant |
19 * |---------------+--------+---------+---------+--------+--------+-----------|
20 * | DEV_WRITE_S | device | proc nr | bytes | off lo | off hi | buf grant |
21 * |---------------+--------+---------+---------+--------+--------+-----------|
22 * | DEV_GATHER_S | device | proc nr | iov len | off lo | off hi | iov grant |
23 * |---------------+--------+---------+---------+--------+--------+-----------|
24 * | DEV_SCATTER_S | device | proc nr | iov len | off lo | off hi | iov grant |
25 * |---------------+--------+---------+---------+--------+--------+-----------|
26 * | DEV_IOCTL_S | device | proc nr | request | | | buf grant |
27 * |---------------+--------+---------+---------+--------+--------+-----------|
28 * | CANCEL | device | proc nr | r/w | | | |
29 * ----------------------------------------------------------------------------
31 * The file contains the following entry points:
33 * driver_announce: called by a device driver to announce it is up
34 * driver_receive: receive() interface for drivers
35 * driver_receive_mq: receive() interface for drivers with message queueing
36 * driver_task: called by the device dependent task entry
37 * driver_init_buffer: initialize a DMA buffer
38 * driver_mq_queue: queue an incoming message for later processing
41 #include <minix/drivers.h>
42 #include <sys/ioc_disk.h>
44 #include <minix/endpoint.h>
45 #include <minix/driver.h>
48 /* Claim space for variables. */
49 u8_t
*tmp_buf
= NULL
; /* the DMA buffer eventually */
50 phys_bytes tmp_phys
; /* phys address of DMA buffer */
52 FORWARD
_PROTOTYPE( void clear_open_devs
, (void) );
53 FORWARD
_PROTOTYPE( int is_open_dev
, (int device
) );
54 FORWARD
_PROTOTYPE( void set_open_dev
, (int device
) );
56 FORWARD
_PROTOTYPE( void asyn_reply
, (message
*mess
, int proc_nr
, int r
) );
57 FORWARD
_PROTOTYPE( int driver_reply
, (endpoint_t caller_e
, int caller_status
,
59 FORWARD
_PROTOTYPE( int driver_spurious_reply
, (endpoint_t caller_e
,
60 int caller_status
, message
*m_ptr
) );
61 FORWARD
_PROTOTYPE( int do_rdwt
, (struct driver
*dr
, message
*mp
) );
62 FORWARD
_PROTOTYPE( int do_vrdwt
, (struct driver
*dr
, message
*mp
) );
65 PRIVATE mq_t
*queue_head
= NULL
;
66 PRIVATE
int open_devs
[MAX_NR_OPEN_DEVICES
];
67 PRIVATE
int next_open_devs_slot
= 0;
69 /*===========================================================================*
71 *===========================================================================*/
72 PRIVATE
void clear_open_devs()
74 next_open_devs_slot
= 0;
77 /*===========================================================================*
79 *===========================================================================*/
80 PRIVATE
int is_open_dev(int device
)
82 int i
, open_dev_found
;
84 open_dev_found
= FALSE
;
85 for(i
=0;i
<next_open_devs_slot
;i
++) {
86 if(open_devs
[i
] == device
) {
87 open_dev_found
= TRUE
;
92 return open_dev_found
;
95 /*===========================================================================*
97 *===========================================================================*/
98 PRIVATE
void set_open_dev(int device
)
100 if(next_open_devs_slot
>= MAX_NR_OPEN_DEVICES
) {
101 panic("out of slots for open devices");
103 open_devs
[next_open_devs_slot
] = device
;
104 next_open_devs_slot
++;
107 /*===========================================================================*
109 *===========================================================================*/
110 PRIVATE
void asyn_reply(mess
, proc_nr
, r
)
115 /* Send a reply using the new asynchronous character device protocol.
119 switch (mess
->m_type
) {
121 reply_mess
.m_type
= DEV_REVIVE
;
122 reply_mess
.REP_ENDPT
= proc_nr
;
123 reply_mess
.REP_STATUS
= r
;
127 reply_mess
.m_type
= DEV_CLOSE_REPL
;
128 reply_mess
.REP_ENDPT
= proc_nr
;
129 reply_mess
.REP_STATUS
= r
;
135 printf("driver_task: reviving %d with SUSPEND\n", proc_nr
);
137 reply_mess
.m_type
= DEV_REVIVE
;
138 reply_mess
.REP_ENDPT
= proc_nr
;
139 reply_mess
.REP_IO_GRANT
= (cp_grant_id_t
) mess
->IO_GRANT
;
140 reply_mess
.REP_STATUS
= r
;
144 /* The original request should send a reply. */
148 reply_mess
.m_type
= DEV_SEL_REPL1
;
149 reply_mess
.DEV_MINOR
= mess
->DEVICE
;
150 reply_mess
.DEV_SEL_OPS
= r
;
154 reply_mess
.m_type
= TASK_REPLY
;
155 reply_mess
.REP_ENDPT
= proc_nr
;
156 /* Status is # of bytes transferred or error code. */
157 reply_mess
.REP_STATUS
= r
;
161 r
= asynsend(device_caller
, &reply_mess
);
164 printf("driver_task: unable to asynsend to %d: %d\n",
169 /*===========================================================================*
171 *===========================================================================*/
172 PRIVATE
int driver_reply(caller_e
, caller_status
, m_ptr
)
177 /* Reply to a message sent to the driver. */
180 /* Use sendnb if caller is guaranteed to be blocked, asynsend otherwise. */
181 if(IPC_STATUS_CALL(caller_status
) == SENDREC
) {
182 r
= sendnb(caller_e
, m_ptr
);
185 r
= asynsend(caller_e
, m_ptr
);
191 /*===========================================================================*
192 * driver_spurious_reply *
193 *===========================================================================*/
194 PRIVATE
int driver_spurious_reply(caller_e
, caller_status
, m_ptr
)
199 /* Reply to a spurious message pretending to be dead. */
202 m_ptr
->m_type
= TASK_REPLY
;
203 m_ptr
->REP_ENDPT
= m_ptr
->IO_ENDPT
;
204 m_ptr
->REP_STATUS
= ERESTART
;
206 r
= driver_reply(caller_e
, caller_status
, m_ptr
);
208 printf("unable to reply to spurious message from %d\n",
215 /*===========================================================================*
217 *===========================================================================*/
218 PUBLIC
void driver_announce()
220 /* Announce we are up after a fresh start or restart. */
222 char key
[DS_MAX_KEYLEN
];
223 char label
[DS_MAX_KEYLEN
];
224 char *driver_prefix
= "drv.vfs.";
226 /* Callers are allowed to use sendrec to communicate with drivers.
227 * For this reason, there may blocked callers when a driver restarts.
228 * Ask the kernel to unblock them (if any).
230 r
= sys_statectl(SYS_STATE_CLEAR_IPC_REFS
);
232 panic("driver_announce: sys_statectl failed: %d\n", r
);
235 /* Publish a driver up event. */
236 r
= ds_retrieve_label_name(label
, getprocnr());
238 panic("driver_announce: unable to get own label: %d\n", r
);
240 snprintf(key
, DS_MAX_KEYLEN
, "%s%s", driver_prefix
, label
);
241 r
= ds_publish_u32(key
, DS_DRIVER_UP
, DSF_OVERWRITE
);
243 panic("driver_announce: unable to publish driver up event: %d\n", r
);
246 /* Expect a DEV_OPEN for any device before serving regular driver requests. */
250 /*===========================================================================*
252 *===========================================================================*/
253 PUBLIC
int driver_receive(src
, m_ptr
, status_ptr
)
258 /* receive() interface for drivers. */
263 /* Wait for a request. */
264 r
= sef_receive_status(src
, m_ptr
, &ipc_status
);
265 *status_ptr
= ipc_status
;
270 /* See if only DEV_OPEN is to be expected for this device. */
271 if(IS_DEV_MINOR_RQ(m_ptr
->m_type
) && !is_open_dev(m_ptr
->DEVICE
)) {
272 if(m_ptr
->m_type
!= DEV_OPEN
) {
273 if(!is_ipc_asynch(ipc_status
)) {
274 driver_spurious_reply(m_ptr
->m_source
,
279 set_open_dev(m_ptr
->DEVICE
);
288 /*===========================================================================*
289 * driver_receive_mq *
290 *===========================================================================*/
291 PUBLIC
int driver_receive_mq(m_ptr
, status_ptr
)
295 /* receive() interface for drivers with message queueing. */
298 /* Any queued messages? Oldest are at the head. */
302 memcpy(m_ptr
, &mq
->mq_mess
, sizeof(mq
->mq_mess
));
303 ipc_status
= mq
->mq_mess_status
;
304 *status_ptr
= ipc_status
;
305 queue_head
= queue_head
->mq_next
;
308 /* See if only DEV_OPEN is to be expected for this device. */
309 if(IS_DEV_MINOR_RQ(m_ptr
->m_type
) && !is_open_dev(m_ptr
->DEVICE
)) {
310 if(m_ptr
->m_type
!= DEV_OPEN
) {
311 if(!is_ipc_asynch(ipc_status
)) {
312 driver_spurious_reply(m_ptr
->m_source
,
317 set_open_dev(m_ptr
->DEVICE
);
323 /* Fall back to standard receive() interface for drivers. */
324 return driver_receive(ANY
, m_ptr
, status_ptr
);
327 /*===========================================================================*
329 *===========================================================================*/
330 PUBLIC
void driver_task(dp
, type
)
331 struct driver
*dp
; /* Device dependent entry points. */
332 int type
; /* Driver type (DRIVER_STD or DRIVER_ASYN) */
334 /* Main program of any device driver task. */
336 int r
, proc_nr
, ipc_status
;
339 /* Here is the main loop of the disk task. It waits for a message, carries
340 * it out, and sends a reply.
343 if ((r
=driver_receive_mq(&mess
, &ipc_status
)) != OK
)
344 panic("driver_receive_mq failed: %d", r
);
346 device_caller
= mess
.m_source
;
347 proc_nr
= mess
.IO_ENDPT
;
349 /* Now carry out the work. */
350 if (is_ipc_notify(ipc_status
)) {
351 switch (_ENDPOINT_P(mess
.m_source
)) {
353 /* leftover interrupt or expired timer. */
355 (*dp
->dr_hw_int
)(dp
, &mess
);
359 (*dp
->dr_alarm
)(dp
, &mess
);
363 r
= (*dp
->dr_other
)(dp
, &mess
);
369 /* done, get a new message */
373 switch(mess
.m_type
) {
374 case DEV_OPEN
: r
= (*dp
->dr_open
)(dp
, &mess
); break;
375 case DEV_CLOSE
: r
= (*dp
->dr_close
)(dp
, &mess
); break;
376 case DEV_IOCTL_S
: r
= (*dp
->dr_ioctl
)(dp
, &mess
); break;
377 case CANCEL
: r
= (*dp
->dr_cancel
)(dp
, &mess
);break;
378 case DEV_SELECT
: r
= (*dp
->dr_select
)(dp
, &mess
);break;
380 case DEV_WRITE_S
: r
= do_rdwt(dp
, &mess
); break;
382 case DEV_SCATTER_S
: r
= do_vrdwt(dp
, &mess
); break;
386 r
= (*dp
->dr_other
)(dp
, &mess
);
393 /* Clean up leftover state. */
396 /* Finally, prepare and send the reply message. */
402 mess
.m_type
= TASK_REPLY
;
403 mess
.REP_ENDPT
= proc_nr
;
404 /* Status is # of bytes transferred or error code. */
407 r
= driver_reply(device_caller
, ipc_status
, &mess
);
410 printf("driver_task: unable to send reply to %d: %d\n",
417 asyn_reply(&mess
, proc_nr
, r
);
422 panic("unknown driver type: %d", type
);
428 /*===========================================================================*
429 * driver_init_buffer *
430 *===========================================================================*/
431 PUBLIC
void driver_init_buffer(void)
433 /* Select a buffer that can safely be used for DMA transfers. It may also
434 * be used to read partition tables and such. Its absolute address is
435 * 'tmp_phys', the normal address is 'tmp_buf'.
438 if(!(tmp_buf
= alloc_contig(2*DMA_BUF_SIZE
, AC_ALIGN4K
, &tmp_phys
)))
439 panic("can't allocate tmp_buf: %d", DMA_BUF_SIZE
);
442 /*===========================================================================*
444 *===========================================================================*/
445 PRIVATE
int do_rdwt(dp
, mp
)
446 struct driver
*dp
; /* device dependent entry points */
447 message
*mp
; /* pointer to read or write message */
449 /* Carry out a single read or write request. */
454 /* Disk address? Address and length of the user buffer? */
455 if (mp
->COUNT
< 0) return(EINVAL
);
457 /* Prepare for I/O. */
458 if ((*dp
->dr_prepare
)(mp
->DEVICE
) == NIL_DEV
) return(ENXIO
);
460 /* Create a one element scatter/gather vector for the buffer. */
461 if(mp
->m_type
== DEV_READ_S
) opcode
= DEV_GATHER_S
;
462 else opcode
= DEV_SCATTER_S
;
464 iovec1
.iov_addr
= (vir_bytes
) mp
->IO_GRANT
;
465 iovec1
.iov_size
= mp
->COUNT
;
467 /* Transfer bytes from/to the device. */
468 position
= make64(mp
->POSITION
, mp
->HIGHPOS
);
469 r
= (*dp
->dr_transfer
)(mp
->IO_ENDPT
, opcode
, position
, &iovec1
, 1);
471 /* Return the number of bytes transferred or an error code. */
472 return(r
== OK
? (mp
->COUNT
- iovec1
.iov_size
) : r
);
475 /*==========================================================================*
477 *==========================================================================*/
478 PRIVATE
int do_vrdwt(dp
, mp
)
479 struct driver
*dp
; /* device dependent entry points */
480 message
*mp
; /* pointer to read or write message */
482 /* Carry out an device read or write to/from a vector of user addresses.
483 * The "user addresses" are assumed to be safe, i.e. FS transferring to/from
484 * its own buffers, so they are not checked.
486 static iovec_t iovec
[NR_IOREQS
];
487 phys_bytes iovec_size
;
492 nr_req
= mp
->COUNT
; /* Length of I/O vector */
494 /* Copy the vector from the caller to kernel space. */
495 if (nr_req
> NR_IOREQS
) nr_req
= NR_IOREQS
;
496 iovec_size
= (phys_bytes
) (nr_req
* sizeof(iovec
[0]));
498 if (OK
!= sys_safecopyfrom(mp
->m_source
, (vir_bytes
) mp
->IO_GRANT
,
499 0, (vir_bytes
) iovec
, iovec_size
, D
)) {
500 panic("bad I/O vector by: %d", mp
->m_source
);
503 /* Prepare for I/O. */
504 if ((*dp
->dr_prepare
)(mp
->DEVICE
) == NIL_DEV
) return(ENXIO
);
506 /* Transfer bytes from/to the device. */
508 position
= make64(mp
->POSITION
, mp
->HIGHPOS
);
509 r
= (*dp
->dr_transfer
)(mp
->IO_ENDPT
, opcode
, position
, iovec
, nr_req
);
511 /* Copy the I/O vector back to the caller. */
512 if (OK
!= sys_safecopyto(mp
->m_source
, (vir_bytes
) mp
->IO_GRANT
,
513 0, (vir_bytes
) iovec
, iovec_size
, D
)) {
514 panic("couldn't return I/O vector: %d", mp
->m_source
);
520 /*===========================================================================*
522 *===========================================================================*/
523 PUBLIC
char *no_name()
525 /* Use this default name if there is no specific name for the device. This was
526 * originally done by fetching the name from the task table for this process:
527 * "return(tasktab[proc_number(proc_ptr) + NR_TASKS].name);", but currently a
528 * real "noname" is returned. Perhaps, some system information service can be
529 * queried for a name at a later time.
531 static char name
[] = "noname";
535 /*============================================================================*
537 *============================================================================*/
538 PUBLIC
int do_nop(dp
, mp
)
542 /* Nothing there, or nothing to do. */
544 switch (mp
->m_type
) {
545 case DEV_OPEN
: return(ENODEV
);
546 case DEV_CLOSE
: return(OK
);
548 default: printf("nop: ignoring code %d\n", mp
->m_type
);
553 /*============================================================================*
555 *============================================================================*/
556 PUBLIC
int nop_ioctl(dp
, mp
)
563 /*============================================================================*
565 *============================================================================*/
566 PUBLIC
void nop_alarm(dp
, mp
)
570 /* Ignore the leftover alarm. */
573 /*===========================================================================*
575 *===========================================================================*/
576 PUBLIC
struct device
*nop_prepare(int device
)
578 /* Nothing to prepare for. */
582 /*===========================================================================*
584 *===========================================================================*/
585 PUBLIC
void nop_cleanup()
587 /* Nothing to clean up. */
590 /*===========================================================================*
592 *===========================================================================*/
593 PUBLIC
int nop_cancel(struct driver
*dr
, message
*m
)
595 /* Nothing to do for cancel. */
599 /*===========================================================================*
601 *===========================================================================*/
602 PUBLIC
int nop_select(struct driver
*dr
, message
*m
)
604 /* Nothing to do for select. */
608 /*============================================================================*
610 *============================================================================*/
611 PUBLIC
int do_diocntl(dp
, mp
)
613 message
*mp
; /* pointer to ioctl request */
615 /* Carry out a partition setting/getting request. */
617 struct partition entry
;
620 if (mp
->REQUEST
!= DIOCSETP
&& mp
->REQUEST
!= DIOCGETP
) {
622 return dp
->dr_other(dp
, mp
);
623 } else return(ENOTTY
);
626 /* Decode the message parameters. */
627 if ((dv
= (*dp
->dr_prepare
)(mp
->DEVICE
)) == NIL_DEV
) return(ENXIO
);
629 if (mp
->REQUEST
== DIOCSETP
) {
630 /* Copy just this one partition table entry. */
631 s
=sys_safecopyfrom(mp
->IO_ENDPT
, (vir_bytes
) mp
->IO_GRANT
,
632 0, (vir_bytes
) &entry
, sizeof(entry
), D
);
635 dv
->dv_base
= entry
.base
;
636 dv
->dv_size
= entry
.size
;
638 /* Return a partition table entry and the geometry of the drive. */
639 entry
.base
= dv
->dv_base
;
640 entry
.size
= dv
->dv_size
;
641 (*dp
->dr_geometry
)(&entry
);
642 s
=sys_safecopyto(mp
->IO_ENDPT
, (vir_bytes
) mp
->IO_GRANT
,
643 0, (vir_bytes
) &entry
, sizeof(entry
), D
);
650 /*===========================================================================*
652 *===========================================================================*/
653 PUBLIC
int driver_mq_queue(message
*m
, int status
)
656 static int mq_initialized
= FALSE
;
658 if(!mq_initialized
) {
659 /* Init MQ library. */
661 mq_initialized
= TRUE
;
665 panic("driver_mq_queue: mq_get failed");
666 memcpy(&mq
->mq_mess
, m
, sizeof(mq
->mq_mess
));
667 mq
->mq_mess_status
= status
;
672 for(mi
= queue_head
; mi
->mq_next
; mi
= mi
->mq_next
)