memory: use sys_safememset() for /dev/zero
[minix.git] / drivers / vbox / hgcm.c
blob2955fe5a33e6a9549603d185b6a6bee0fac9c5f1
1 /* VirtualBox driver - by D.C. van Moolenbroek */
2 #include <minix/drivers.h>
3 #include <minix/vboxtype.h>
4 #include <minix/vboxif.h>
5 #include <assert.h>
7 #include "vmmdev.h"
8 #include "proto.h"
10 #define MAX_CONNS 4 /* maximum number of HGCM connections */
11 #define MAX_REQS 2 /* number of concurrent requests per conn. */
12 #define MAX_PARAMS 8 /* maximum number of parameters per request */
14 /* HGCM connection states. */
15 enum {
16 STATE_FREE,
17 STATE_OPENING,
18 STATE_OPEN,
19 STATE_CLOSING
22 /* HGCM connection information. */
23 static struct {
24 int state; /* connection state */
25 endpoint_t endpt; /* caller endpoint */
26 u32_t client_id; /* VMMDev-given client ID */
27 struct {
28 int busy; /* is this request ongoing? */
29 struct VMMDevHGCMHeader *ptr; /* request buffer */
30 phys_bytes addr; /* buffer's physical address */
32 int status; /* IPC status of request */
33 long id; /* request ID */
35 cp_grant_id_t grant; /* grant for parameters */
36 int count; /* number of parameters */
37 vbox_param_t param[MAX_PARAMS]; /* local copy of parameters */
38 } req[MAX_REQS]; /* concurrent requests */
39 } hgcm_conn[MAX_CONNS];
41 /*===========================================================================*
42 * convert_result *
43 *===========================================================================*/
44 static int convert_result(int res)
46 /* Convert a VirtualBox result code to a POSIX error code.
49 /* HGCM transport error codes. */
50 switch (res) {
51 case VMMDEV_ERR_HGCM_NOT_FOUND: return ESRCH;
52 case VMMDEV_ERR_HGCM_DENIED: return EPERM;
53 case VMMDEV_ERR_HGCM_INVALID_ADDR: return EFAULT;
54 case VMMDEV_ERR_HGCM_ASYNC_EXEC: return EDONTREPLY;
55 case VMMDEV_ERR_HGCM_INTERNAL: return EGENERIC;
56 case VMMDEV_ERR_HGCM_INVALID_ID: return EINVAL;
59 /* Positive codes are success codes. */
60 if (res >= 0)
61 return OK;
63 /* Unsupported negative codes are translated to EGENERIC; it is up to
64 * the caller to check the actual VirtualBox result code in that case.
66 return convert_err(res);
69 /*===========================================================================*
70 * send_reply *
71 *===========================================================================*/
72 static void send_reply(endpoint_t endpt, int ipc_status, int result, int code,
73 long id)
75 /* Send a reply to an earlier request. */
76 message m;
77 int r;
79 memset(&m, 0, sizeof(m));
80 m.m_type = VBOX_REPLY;
81 m.VBOX_RESULT = result;
82 m.VBOX_CODE = code;
83 m.VBOX_ID = id;
85 if (IPC_STATUS_CALL(ipc_status) == SENDREC)
86 r = sendnb(endpt, &m);
87 else
88 r = asynsend3(endpt, &m, AMF_NOREPLY);
90 if (r != OK)
91 printf("VBOX: unable to send reply to %d: %d\n", endpt, r);
94 /*===========================================================================*
95 * alloc_req *
96 *===========================================================================*/
97 static int alloc_req(int conn)
99 /* Allocate a request for the given connection. Allocate memory as
100 * necessary. Do not mark the request as busy, as it may end up not
101 * being used.
103 phys_bytes addr;
104 void *ptr;
105 int req;
107 for (req = 0; req < MAX_REQS; req++)
108 if (!hgcm_conn[conn].req[req].busy)
109 break;
111 if (req == MAX_REQS)
112 return EMFILE;
114 if (hgcm_conn[conn].req[req].ptr == NULL) {
115 if ((ptr = alloc_contig(VMMDEV_BUF_SIZE, 0, &addr)) == NULL)
116 return ENOMEM;
118 hgcm_conn[conn].req[req].ptr = (struct VMMDevHGCMHeader *) ptr;
119 hgcm_conn[conn].req[req].addr = addr;
122 return req;
125 /*===========================================================================*
126 * free_conn *
127 *===========================================================================*/
128 static void free_conn(int conn)
130 /* Free the memory for all requests of the given connections, and mark
131 * the connection as free.
133 void *ptr;
134 int req;
136 for (req = 0; req < MAX_REQS; req++) {
137 if ((ptr = (void *) hgcm_conn[conn].req[req].ptr) != NULL) {
138 assert(!hgcm_conn[conn].req[req].busy);
140 free_contig(ptr, VMMDEV_BUF_SIZE);
142 hgcm_conn[conn].req[req].ptr = NULL;
146 hgcm_conn[conn].state = STATE_FREE;
149 /*===========================================================================*
150 * start_req *
151 *===========================================================================*/
152 static int start_req(int conn, int req, int type, size_t size, int ipc_status,
153 long id, int *code)
155 /* Start a request. */
156 int r, res;
158 hgcm_conn[conn].req[req].ptr->flags = 0;
159 hgcm_conn[conn].req[req].ptr->result = VMMDEV_ERR_GENERIC;
161 *code = res = vbox_request(&hgcm_conn[conn].req[req].ptr->header,
162 hgcm_conn[conn].req[req].addr, type, size);
164 r = convert_result(res);
166 if (r != OK && r != EDONTREPLY)
167 return r;
169 /* The request may be processed either immediately or asynchronously.
170 * The caller of this function must be able to cope with both
171 * situations. In either case, mark the current request as ongoing.
173 hgcm_conn[conn].req[req].busy = TRUE;
174 hgcm_conn[conn].req[req].status = ipc_status;
175 hgcm_conn[conn].req[req].id = id;
177 return r;
180 /*===========================================================================*
181 * cancel_req *
182 *===========================================================================*/
183 static void cancel_req(int conn, int req)
185 /* Cancel an ongoing request. */
187 assert(hgcm_conn[conn].req[req].ptr != NULL);
189 /* The cancel request consists only of the HGCM header. The physical
190 * location determines the request to cancel. Note that request
191 * cancellation this is full of race conditions, so we simply ignore
192 * the return value and assumed all went well.
194 hgcm_conn[conn].req[req].ptr->flags = 0;
195 hgcm_conn[conn].req[req].ptr->result = VMMDEV_ERR_GENERIC;
197 vbox_request(&hgcm_conn[conn].req[req].ptr->header,
198 hgcm_conn[conn].req[req].addr, VMMDEV_REQ_HGCMCANCEL,
199 sizeof(struct VMMDevHGCMCancel));
201 hgcm_conn[conn].req[req].busy = FALSE;
204 /*===========================================================================*
205 * finish_req *
206 *===========================================================================*/
207 static int finish_req(int conn, int req, int *code)
209 /* The given request has finished. Take the appropriate action.
211 struct VMMDevHGCMConnect *connreq;
212 struct VMMDevHGCMCall *callreq;
213 struct VMMDevHGCMParam *inp;
214 vbox_param_t *outp;
215 int i, count, res, r = OK;
217 hgcm_conn[conn].req[req].busy = FALSE;
219 *code = res = hgcm_conn[conn].req[req].ptr->result;
221 r = convert_result(res);
223 /* The request has finished, so it cannot still be in progress. */
224 if (r == EDONTREPLY)
225 r = EGENERIC;
227 switch (hgcm_conn[conn].state) {
228 case STATE_FREE:
229 assert(0);
231 break;
233 case STATE_OPENING:
234 if (r == OK) {
235 connreq = (struct VMMDevHGCMConnect *)
236 hgcm_conn[conn].req[req].ptr;
237 hgcm_conn[conn].client_id = connreq->client_id;
238 hgcm_conn[conn].state = STATE_OPEN;
240 r = conn;
241 } else {
242 free_conn(conn);
245 break;
247 case STATE_CLOSING:
248 /* Neither we nor the caller can do anything with failures. */
249 if (r != OK)
250 printf("VBOX: disconnection failure #2 (%d)\n", res);
252 free_conn(conn);
254 r = OK;
256 break;
258 case STATE_OPEN:
259 /* On success, extract and copy back parameters to the caller.
261 if (r == OK) {
262 callreq = (struct VMMDevHGCMCall *)
263 hgcm_conn[conn].req[req].ptr;
264 inp = (struct VMMDevHGCMParam *) &callreq[1];
265 outp = &hgcm_conn[conn].req[req].param[0];
266 count = hgcm_conn[conn].req[req].count;
268 for (i = 0; i < count; i++) {
269 switch (outp->type) {
270 case VBOX_TYPE_U32:
271 outp->u32 = inp->u32;
272 break;
274 case VBOX_TYPE_U64:
275 outp->u64 = inp->u64;
276 break;
278 default:
279 break;
282 inp++;
283 outp++;
286 if (count > 0) {
287 r = sys_safecopyto(hgcm_conn[conn].endpt,
288 hgcm_conn[conn].req[req].grant, 0,
289 (vir_bytes)
290 hgcm_conn[conn].req[req].param,
291 count * sizeof(vbox_param_t));
295 break;
298 return r;
301 /*===========================================================================*
302 * check_conn *
303 *===========================================================================*/
304 static void check_conn(int conn)
306 /* Check all requests for the given connection for completion. */
307 int r, req, code;
309 for (req = 0; req < MAX_REQS; req++) {
310 if (!hgcm_conn[conn].req[req].busy) continue;
312 if (!(hgcm_conn[conn].req[req].ptr->flags &
313 VMMDEV_HGCM_REQ_DONE))
314 continue;
316 r = finish_req(conn, req, &code);
318 assert(r != EDONTREPLY);
320 send_reply(hgcm_conn[conn].endpt,
321 hgcm_conn[conn].req[req].status, r, code,
322 hgcm_conn[conn].req[req].id);
326 /*===========================================================================*
327 * do_open *
328 *===========================================================================*/
329 static int do_open(message *m_ptr, int ipc_status, int *code)
331 /* Process a connection request. */
332 struct VMMDevHGCMConnect *connreq;
333 int i, r, conn, count;
335 if (m_ptr->VBOX_COUNT < 0 || m_ptr->VBOX_COUNT > VMMDEV_HGCM_NAME_SIZE)
336 return EINVAL;
338 /* Find a free connection slot. Make sure the sending endpoint is not
339 * already using up half of the connection slots.
341 conn = -1;
342 count = 0;
343 for (i = 0; i < MAX_CONNS; i++) {
344 if (conn < 0 && hgcm_conn[i].state == STATE_FREE)
345 conn = i;
346 if (hgcm_conn[i].endpt == m_ptr->m_source)
347 count++;
350 if (count >= MAX(MAX_CONNS / 2, 2))
351 return EMFILE;
353 if (conn < 0)
354 return ENFILE;
356 /* Initialize the connection and request structures. */
357 hgcm_conn[conn].state = STATE_OPENING;
358 hgcm_conn[conn].endpt = m_ptr->m_source;
360 for (i = 0; i < MAX_REQS; i++) {
361 hgcm_conn[conn].req[i].busy = FALSE;
362 hgcm_conn[conn].req[i].ptr = NULL;
365 /* Set up and start the connection request. */
366 r = alloc_req(conn);
368 if (r < 0)
369 return r;
370 assert(r == 0);
372 connreq = (struct VMMDevHGCMConnect *) hgcm_conn[conn].req[0].ptr;
373 connreq->type = VMMDEV_HGCM_SVCLOC_LOCALHOST_EXISTING;
374 if ((r = sys_safecopyfrom(m_ptr->m_source, m_ptr->VBOX_GRANT, 0,
375 (vir_bytes) connreq->name, m_ptr->VBOX_COUNT)) !=
376 OK) {
377 free_conn(conn);
379 return r;
381 connreq->name[VMMDEV_HGCM_NAME_SIZE-1] = 0;
383 r = start_req(conn, 0, VMMDEV_REQ_HGCMCONNECT, sizeof(*connreq),
384 ipc_status, m_ptr->VBOX_ID, code);
386 if (r != OK && r != EDONTREPLY) {
387 free_conn(conn);
389 return r;
392 return (r == OK) ? finish_req(conn, 0, code) : r;
395 /*===========================================================================*
396 * do_close *
397 *===========================================================================*/
398 static int do_close(message *m_ptr, int ipc_status, int *code)
400 /* Process a disconnection request. */
401 struct VMMDevHGCMDisconnect *discreq;
402 int r, conn, req;
404 conn = m_ptr->VBOX_CONN;
406 /* Sanity checks. */
407 if (conn < 0 || conn >= MAX_CONNS)
408 return EINVAL;
409 if (hgcm_conn[conn].endpt != m_ptr->m_source ||
410 hgcm_conn[conn].state != STATE_OPEN)
411 return EINVAL;
413 /* Cancel any ongoing requests. */
414 for (req = 0; req < MAX_REQS; req++)
415 if (hgcm_conn[conn].req[req].busy)
416 cancel_req(conn, req);
418 assert(hgcm_conn[conn].req[0].ptr != NULL);
420 discreq = (struct VMMDevHGCMDisconnect *) hgcm_conn[conn].req[0].ptr;
421 discreq->client_id = hgcm_conn[conn].client_id;
423 r = start_req(conn, 0, VMMDEV_REQ_HGCMDISCONNECT, sizeof(*discreq),
424 ipc_status, m_ptr->VBOX_ID, code);
426 if (r != OK && r != EDONTREPLY) {
427 /* Neither we nor the caller can do anything with failures. */
428 printf("VBOX: disconnection failure #1 (%d)\n", r);
430 free_conn(conn);
432 return OK;
435 hgcm_conn[conn].state = STATE_CLOSING;
437 return (r == OK) ? finish_req(conn, 0, code) : r;
440 /*===========================================================================*
441 * store_pages *
442 *===========================================================================*/
443 static int store_pages(int conn, int req, vbox_param_t *inp, size_t *offp)
445 /* Create a page list of physical pages that make up the provided
446 * buffer area.
448 struct vumap_vir vvec;
449 struct vumap_phys pvec[MAPVEC_NR];
450 struct VMMDevHGCMPageList *pagelist;
451 size_t offset, size, skip;
452 int i, j, r, first, access, count, pages;
454 /* Empty strings are allowed. */
455 if (inp->ptr.size == 0)
456 return OK;
458 pagelist = (struct VMMDevHGCMPageList *)
459 (((u8_t *) hgcm_conn[conn].req[req].ptr) + *offp);
461 pagelist->flags = 0;
462 if (inp->ptr.dir & VBOX_DIR_IN)
463 pagelist->flags |= VMMDEV_HGCM_FLAG_FROM_HOST;
464 if (inp->ptr.dir & VBOX_DIR_OUT)
465 pagelist->flags |= VMMDEV_HGCM_FLAG_TO_HOST;
466 pagelist->count = 0;
468 /* Make sure there is room for the header (but no actual pages yet). */
469 *offp += sizeof(*pagelist) - sizeof(pagelist->addr[0]);
470 if (*offp > VMMDEV_BUF_SIZE)
471 return ENOMEM;
473 access = 0;
474 if (inp->ptr.dir & VBOX_DIR_IN) access |= VUA_WRITE;
475 if (inp->ptr.dir & VBOX_DIR_OUT) access |= VUA_READ;
477 offset = 0;
478 first = TRUE;
479 do {
480 /* If the caller gives us a huge buffer, we might need multiple
481 * calls to sys_vumap(). Note that the caller currently has no
482 * reliable way to know whether such a buffer will fit in our
483 * request page. In the future, we may dynamically reallocate
484 * the request area to make more room as necessary; for now we
485 * just return an ENOMEM error in such cases.
487 vvec.vv_grant = inp->ptr.grant;
488 vvec.vv_size = inp->ptr.off + inp->ptr.size;
489 count = MAPVEC_NR;
490 if ((r = sys_vumap(hgcm_conn[conn].endpt, &vvec, 1,
491 inp->ptr.off + offset, access, pvec,
492 &count)) != OK)
493 return r;
495 /* First get the number of bytes processed, before (possibly)
496 * adjusting the size of the first element.
498 for (i = size = 0; i < count; i++)
499 size += pvec[i].vp_size;
501 /* VirtualBox wants aligned page addresses only, and an offset
502 * into the first page. All other pages except the last are
503 * full pages, and the last page is cut off using the size.
505 skip = 0;
506 if (first) {
507 skip = pvec[0].vp_addr & (PAGE_SIZE - 1);
508 pvec[0].vp_addr -= skip;
509 pvec[0].vp_size += skip;
510 pagelist->offset = skip;
511 first = FALSE;
514 /* How many pages were mapped? */
515 pages = (skip + size + PAGE_SIZE - 1) / PAGE_SIZE;
517 /* Make sure there is room to store this many extra pages. */
518 *offp += sizeof(pagelist->addr[0]) * pages;
519 if (*offp > VMMDEV_BUF_SIZE)
520 return ENOMEM;
522 /* Actually store the pages in the page list. */
523 for (i = j = 0; i < pages; i++) {
524 assert(!(pvec[j].vp_addr & (PAGE_SIZE - 1)));
526 pagelist->addr[pagelist->count++] =
527 cvul64(pvec[j].vp_addr);
529 if (pvec[j].vp_size > PAGE_SIZE) {
530 pvec[j].vp_addr += PAGE_SIZE;
531 pvec[j].vp_size -= PAGE_SIZE;
533 else j++;
535 assert(j == count);
537 offset += size;
538 } while (offset < inp->ptr.size);
540 assert(offset == inp->ptr.size);
542 return OK;
545 /*===========================================================================*
546 * do_call *
547 *===========================================================================*/
548 static int do_call(message *m_ptr, int ipc_status, int *code)
550 /* Perform a HGCM call. */
551 vbox_param_t *inp;
552 struct VMMDevHGCMParam *outp;
553 struct VMMDevHGCMCall *callreq;
554 size_t size;
555 int i, r, conn, req, count;
557 conn = m_ptr->VBOX_CONN;
558 count = m_ptr->VBOX_COUNT;
560 /* Sanity checks. */
561 if (conn < 0 || conn >= MAX_CONNS)
562 return EINVAL;
563 if (hgcm_conn[conn].endpt != m_ptr->m_source ||
564 hgcm_conn[conn].state != STATE_OPEN)
565 return EINVAL;
567 /* Allocate a request, and copy in the parameters. */
568 req = alloc_req(conn);
570 if (req < 0)
571 return req;
573 hgcm_conn[conn].req[req].grant = m_ptr->VBOX_GRANT;
574 hgcm_conn[conn].req[req].count = count;
576 if (count > 0) {
577 if ((r = sys_safecopyfrom(m_ptr->m_source, m_ptr->VBOX_GRANT,
578 0, (vir_bytes) hgcm_conn[conn].req[req].param,
579 count * sizeof(vbox_param_t))) != OK)
580 return r;
583 /* Set up the basic request. */
584 callreq = (struct VMMDevHGCMCall *) hgcm_conn[conn].req[req].ptr;
585 callreq->client_id = hgcm_conn[conn].client_id;
586 callreq->function = m_ptr->VBOX_FUNCTION;
587 callreq->count = count;
589 /* Rewrite and convert the parameters. */
590 inp = &hgcm_conn[conn].req[req].param[0];
591 outp = (struct VMMDevHGCMParam *) &callreq[1];
593 size = sizeof(*callreq) + sizeof(*outp) * count;
594 assert(size < VMMDEV_BUF_SIZE);
596 for (i = 0; i < count; i++) {
597 switch (inp->type) {
598 case VBOX_TYPE_U32:
599 outp->type = VMMDEV_HGCM_PARAM_U32;
600 outp->u32 = inp->u32;
601 break;
603 case VBOX_TYPE_U64:
604 outp->type = VMMDEV_HGCM_PARAM_U64;
605 outp->u64 = inp->u64;
606 break;
608 case VBOX_TYPE_PTR:
609 outp->type = VMMDEV_HGCM_PARAM_PAGELIST;
610 outp->pagelist.offset = size;
611 outp->pagelist.size = inp->ptr.size;
613 if ((r = store_pages(conn, req, inp, &size)) != OK)
614 return r;
616 break;
618 default:
619 return EINVAL;
622 inp++;
623 outp++;
626 /* Start the request. */
627 r = start_req(conn, req, VMMDEV_REQ_HGCMCALL, size, ipc_status,
628 m_ptr->VBOX_ID, code);
630 if (r != OK && r != EDONTREPLY)
631 return r;
633 return (r == OK) ? finish_req(conn, req, code) : r;
636 /*===========================================================================*
637 * do_cancel *
638 *===========================================================================*/
639 static int do_cancel(message *m_ptr, int ipc_status)
641 /* Cancel an ongoing call. */
642 int conn, req;
644 conn = m_ptr->VBOX_CONN;
646 /* Sanity checks. Note that connection and disconnection requests
647 * cannot be cancelled.
649 if (conn < 0 || conn >= MAX_CONNS)
650 return EINVAL;
651 if (hgcm_conn[conn].endpt != m_ptr->m_source ||
652 hgcm_conn[conn].state != STATE_OPEN)
653 return EINVAL;
655 /* Find the request. */
656 for (req = 0; req < MAX_REQS; req++) {
657 if (hgcm_conn[conn].req[req].busy &&
658 hgcm_conn[conn].req[req].id == m_ptr->VBOX_ID)
659 break;
662 /* If no such request was ongoing, then our behavior depends on the
663 * way the request was made: we do not want to send two asynchronous
664 * replies for one request, but if the caller used SENDREC, we have to
665 * reply with something or the caller would deadlock.
667 if (req == MAX_REQS) {
668 if (IPC_STATUS_CALL(ipc_status) == SENDREC)
669 return EINVAL;
670 else
671 return EDONTREPLY;
674 /* Actually cancel the request, and send a reply. */
675 cancel_req(conn, req);
677 return EINTR;
680 /*===========================================================================*
681 * hgcm_message *
682 *===========================================================================*/
683 void hgcm_message(message *m_ptr, int ipc_status)
685 /* Process a request message. */
686 int r, code = VMMDEV_ERR_GENERIC;
688 switch (m_ptr->m_type) {
689 case VBOX_OPEN: r = do_open(m_ptr, ipc_status, &code); break;
690 case VBOX_CLOSE: r = do_close(m_ptr, ipc_status, &code); break;
691 case VBOX_CALL: r = do_call(m_ptr, ipc_status, &code); break;
692 case VBOX_CANCEL: r = do_cancel(m_ptr, ipc_status); break;
693 default: r = ENOSYS; break;
696 if (r != EDONTREPLY)
697 send_reply(m_ptr->m_source, ipc_status, r, code,
698 m_ptr->VBOX_ID);
701 /*===========================================================================*
702 * hgcm_intr *
703 *===========================================================================*/
704 void hgcm_intr(void)
706 /* We received an HGCM event. Check ongoing requests for completion. */
707 int conn;
709 for (conn = 0; conn < MAX_CONNS; conn++)
710 if (hgcm_conn[conn].state != STATE_FREE)
711 check_conn(conn);