etc/services - sync with NetBSD-8
[minix.git] / minix / net / lwip / ndev.c
blobd806b5af7eb01ae4eb21e70a141102e17996a32b
1 /* LWIP service - ndev.c - network driver communication module */
2 /*
3 * There is almost a one-to-one mapping between network device driver (ndev)
4 * objects and ethernet interface (ethif) objects, with as major difference
5 * that there may be an ndev object but not an ethif object for a driver that
6 * is known to exist but has not yet replied to our initialization request:
7 * without the information from the initialization request, there is no point
8 * creating an ethif object just yet, while we do need to track the driver
9 * process. TODO: it would be nice if unanswered init requests timed out and
10 * caused the removal of the ndev object after a while.
12 * Beyond that, this module aims to abstract away the low-level details of
13 * communication, memory grants, and driver restarts. Driver restarts are not
14 * fully transparent to the ethif module because it needs to reinitialize
15 * driver state only it knows about after a restart. Drivers that are in the
16 * process of restarting and therefore not operational are said to be disabled.
18 * From this module's point of view, a network driver is one of two states:
19 * initializing, where it has yet to respond to our initialization request, and
20 * active, where it is expected to accept and respond to all other requests.
21 * This module does not keep track of higher-level states and rules however;
22 * that is left to the ethif layer on one side, and the network driver itself
23 * on the other side. One important example is the interface being up or down:
24 * the ndev layer will happily forward send and receive requests when the
25 * interface is down, but these requests will be (resp.) dropped and rejected
26 * by the network driver in that state, and will not be generated by the ethif
27 * layer when the layer is down. Imposing barriers between configure and send
28 * requests is also left to the other parties.
30 * In this module, each active network driver has a send queue and a receive
31 * queue. The send queue is shared for packet send requests and configuration
32 * change requests. The receive queue is used for packet receive requests
33 * only. Each queue has a maximum depth, which is the minimum of a value
34 * provided by the network driver during initialization and local restrictions.
35 * These local restrictions are different for the two queue types: the receive
36 * queue is always bounded to a hardcoded value, while the send queue has a
37 * guaranteed minimum depth but may use up to the driver's maximum using spare
38 * entries. For both, a minimum depth is always available, since it is not
39 * possible to cancel individual send or receive requests after they have been
40 * sent to a particular driver. This does mean that we necessarily waste a
41 * large number of request structures in the common case.
43 * The general API model does not support the notion of blocking calls. While
44 * it would make sense to retrieve e.g. error statistics from the driver only
45 * when requested by userland, implementing this without threads would be
46 * seriously complicated, because such requests can have many origins (ioctl,
47 * PF_ROUTE message, sysctl). Instead, we rely on drivers updating us with the
48 * latest information on everything at all times, so that we can hand over a
49 * cached copy of (e.g.) those error statistics right away. We provide a means
50 * for drivers to perform rate limiting of such status updates (to prevent
51 * overflowing asynsend queues), by replying to these status messages. That
52 * means that there is a request-response combo going in the opposite direction
53 * of the regular messages.
55 * TODO: in the future we will want to obtain the list of supported media modes
56 * (IFM_) from drivers, so that userland can view the list. Given the above
57 * model, the easiest way would be to obtain a copy of the full list, limited
58 * to a configured number of entries, at driver initialization time. This
59 * would require that the initialization request also involve a memory grant.
61 * If necessary, it would not be too much work to split off this module into
62 * its own libndev library. For now, there is no point in doing this and the
63 * tighter coupling allows us to optimize just a little but (see pbuf usage).
66 #include "lwip.h"
67 #include "ndev.h"
68 #include "ethif.h"
70 #define LABEL_MAX 16 /* FIXME: this should be in a system header */
72 #define NDEV_SENDQ 2 /* minimum guaranteed send queue depth */
73 #define NDEV_RECVQ 2 /* guaranteed receive queue depth */
74 #define NREQ_SPARES 8 /* spare send queue (request) objects */
75 #define NR_NREQ ((NDEV_SENDQ + NDEV_RECVQ) * NR_NDEV + NREQ_SPARES)
77 static SIMPLEQ_HEAD(, ndev_req) nreq_freelist;
79 static struct ndev_req {
80 SIMPLEQ_ENTRY(ndev_req) nreq_next; /* next request in queue */
81 int nreq_type; /* type of request message */
82 cp_grant_id_t nreq_grant[NDEV_IOV_MAX]; /* grants for request */
83 } nreq_array[NR_NREQ];
85 static unsigned int nreq_spares; /* number of free spare objects */
87 struct ndev_queue {
88 uint32_t nq_head; /* ID of oldest pending request */
89 uint8_t nq_count; /* current nr of pending requests */
90 uint8_t nq_max; /* maximum nr of pending requests */
91 SIMPLEQ_HEAD(, ndev_req) nq_req; /* queue of pending requests */
94 static struct ndev {
95 endpoint_t ndev_endpt; /* driver endpoint */
96 char ndev_label[LABEL_MAX]; /* driver label */
97 struct ethif *ndev_ethif; /* ethif object, or NULL if init'ing */
98 struct ndev_queue ndev_sendq; /* packet send and configure queue */
99 struct ndev_queue ndev_recvq; /* packet receive queue */
100 } ndev_array[NR_NDEV];
102 static ndev_id_t ndev_max; /* highest driver count ever seen */
105 * This macro checks whether the network driver is active rather than
106 * initializing. See above for more information.
108 #define NDEV_ACTIVE(ndev) ((ndev)->ndev_sendq.nq_max > 0)
110 static int ndev_pending; /* number of initializing drivers */
112 /* The CTL_MINIX MINIX_LWIP "drivers" subtree. Dynamically numbered. */
113 static struct rmib_node minix_lwip_drivers_table[] = {
114 RMIB_INTPTR(RMIB_RO, &ndev_pending, "pending",
115 "Number of drivers currently initializing"),
118 static struct rmib_node minix_lwip_drivers_node =
119 RMIB_NODE(RMIB_RO, minix_lwip_drivers_table, "drivers",
120 "Network driver information");
123 * Initialize the network driver communication module.
125 void
126 ndev_init(void)
128 unsigned int slot;
129 int r;
131 /* Initialize local variables. */
132 ndev_max = 0;
134 SIMPLEQ_INIT(&nreq_freelist);
136 for (slot = 0; slot < __arraycount(nreq_array); slot++)
137 SIMPLEQ_INSERT_TAIL(&nreq_freelist, &nreq_array[slot],
138 nreq_next);
140 nreq_spares = NREQ_SPARES;
143 * Preallocate the total number of grants that we could possibly need
144 * concurrently. Even though it is extremely unlikely that we will
145 * ever need that many grants in practice, the alternative is runtime
146 * dynamic memory (re)allocation which is something we prefer to avoid
147 * altogether. At time of writing, we end up preallocating 320 grants
148 * using up a total of a bit under 9KB of memory.
150 cpf_prealloc(NR_NREQ * NDEV_IOV_MAX);
154 * Not needed, just for ultimate safety: start off all queues with
155 * wildly different request sequence numbers, to minimize the chance
156 * that any two replies will ever be confused.
158 for (slot = 0; slot < __arraycount(ndev_array); slot++) {
159 ndev_array[slot].ndev_sendq.nq_head = slot << 21;
160 ndev_array[slot].ndev_recvq.nq_head = (slot * 2 + 1) << 20;
163 /* Subscribe to Data Store (DS) events from network drivers. */
164 if ((r = ds_subscribe("drv\\.net\\..*",
165 DSF_INITIAL | DSF_OVERWRITE)) != OK)
166 panic("unable to subscribe to driver events: %d", r);
169 * Keep track of how many drivers are in "pending" state, which means
170 * that they have not yet replied to our initialization request.
172 ndev_pending = 0;
174 /* Register the minix.lwip.drivers subtree. */
175 mibtree_register_lwip(&minix_lwip_drivers_node);
179 * Initialize a queue for first use.
181 static void
182 ndev_queue_init(struct ndev_queue * nq)
186 * Only ever increase sequence numbers, to minimize the chance that
187 * two (e.g. from different driver instances) happen to be the same.
189 nq->nq_head++;
191 nq->nq_count = 0;
192 nq->nq_max = 0;
193 SIMPLEQ_INIT(&nq->nq_req);
197 * Advance the given request queue, freeing up the request at the head of the
198 * queue including any grants in use for it.
200 static void
201 ndev_queue_advance(struct ndev_queue * nq)
203 struct ndev_req * nreq;
204 cp_grant_id_t grant;
205 unsigned int i;
207 nreq = SIMPLEQ_FIRST(&nq->nq_req);
209 for (i = 0; i < __arraycount(nreq->nreq_grant); i++) {
210 grant = nreq->nreq_grant[i];
212 if (!GRANT_VALID(grant))
213 break;
215 /* TODO: make the safecopies code stop using errno. */
216 if (cpf_revoke(grant) != 0)
217 panic("unable to revoke grant: %d", -errno);
220 if (nreq->nreq_type != NDEV_RECV && nq->nq_count > NDEV_SENDQ) {
221 nreq_spares++;
223 assert(nreq_spares <= NREQ_SPARES);
226 SIMPLEQ_REMOVE_HEAD(&nq->nq_req, nreq_next);
228 SIMPLEQ_INSERT_HEAD(&nreq_freelist, nreq, nreq_next);
230 nq->nq_head++;
231 nq->nq_count--;
235 * Clear any outstanding requests from the given queue and reset it to a
236 * pre-initialization state.
238 static void
239 ndev_queue_reset(struct ndev_queue * nq)
242 while (nq->nq_count > 0) {
243 assert(!SIMPLEQ_EMPTY(&nq->nq_req));
245 ndev_queue_advance(nq);
248 nq->nq_max = 0;
252 * Obtain a request object for use in a new request. Return the request
253 * object, with its request type field set to 'type', and with the request
254 * sequence ID returned in 'seq'. Return NULL if no request objects are
255 * available for the given request type. If the caller does send off the
256 * request, a call to ndev_queue_add() must follow immediately after. If the
257 * caller fails to send off the request for other reasons, it need not do
258 * anything: this function does not perform any actions that need to be undone.
260 static struct ndev_req *
261 ndev_queue_get(struct ndev_queue * nq, int type, uint32_t * seq)
263 struct ndev_req *nreq;
265 /* Has the hard queue depth limit been reached? */
266 if (nq->nq_count == nq->nq_max)
267 return NULL;
270 * For send requests, we may use request objects from a shared "spares"
271 * pool, if available.
273 if (type != NDEV_RECV && nq->nq_count >= NDEV_SENDQ &&
274 nreq_spares == 0)
275 return NULL;
277 assert(!SIMPLEQ_EMPTY(&nreq_freelist));
278 nreq = SIMPLEQ_FIRST(&nreq_freelist);
280 nreq->nreq_type = type;
282 *seq = nq->nq_head + nq->nq_count;
284 return nreq;
288 * Add a successfully sent request to the given queue. The request must have
289 * been obtained using ndev_queue_get() directly before the call to this
290 * function. This function never fails.
292 static void
293 ndev_queue_add(struct ndev_queue * nq, struct ndev_req * nreq)
296 if (nreq->nreq_type != NDEV_RECV && nq->nq_count >= NDEV_SENDQ) {
297 assert(nreq_spares > 0);
299 nreq_spares--;
302 SIMPLEQ_REMOVE_HEAD(&nreq_freelist, nreq_next);
304 SIMPLEQ_INSERT_TAIL(&nq->nq_req, nreq, nreq_next);
306 nq->nq_count++;
310 * Remove the head of the given request queue, but only if it matches the given
311 * request type and sequence ID. Return TRUE if the head was indeed removed,
312 * or FALSE if the head of the request queue (if any) did not match the given
313 * type and/or sequence ID.
315 static int
316 ndev_queue_remove(struct ndev_queue * nq, int type, uint32_t seq)
318 struct ndev_req *nreq;
320 if (nq->nq_count < 1 || nq->nq_head != seq)
321 return FALSE;
323 assert(!SIMPLEQ_EMPTY(&nq->nq_req));
324 nreq = SIMPLEQ_FIRST(&nq->nq_req);
326 if (nreq->nreq_type != type)
327 return FALSE;
329 ndev_queue_advance(nq);
331 return TRUE;
335 * Send an initialization request to a driver. If this is a new driver, the
336 * ethif module does not get to know about the driver until it answers to this
337 * request, as the ethif module needs much of what the reply contains. On the
338 * other hand, if this is a restarted driver, it will stay disabled until the
339 * init reply comes in.
341 static void
342 ndev_send_init(struct ndev * ndev)
344 message m;
345 int r;
347 memset(&m, 0, sizeof(m));
348 m.m_type = NDEV_INIT;
349 m.m_ndev_netdriver_init.id = ndev->ndev_sendq.nq_head;
351 if ((r = asynsend3(ndev->ndev_endpt, &m, AMF_NOREPLY)) != OK)
352 panic("asynsend to driver failed: %d", r);
356 * A network device driver has been started or restarted.
358 static void
359 ndev_up(const char * label, endpoint_t endpt)
361 static int reported = FALSE;
362 struct ndev *ndev;
363 ndev_id_t slot;
366 * First see if we already had an entry for this driver. If so, it has
367 * been restarted, and we need to report it as not running to ethif.
369 ndev = NULL;
371 for (slot = 0; slot < ndev_max; slot++) {
372 if (ndev_array[slot].ndev_endpt == NONE) {
373 if (ndev == NULL)
374 ndev = &ndev_array[slot];
376 continue;
379 if (!strcmp(ndev_array[slot].ndev_label, label)) {
380 /* Cancel any ongoing requests. */
381 ndev_queue_reset(&ndev_array[slot].ndev_sendq);
382 ndev_queue_reset(&ndev_array[slot].ndev_recvq);
384 if (ndev_array[slot].ndev_ethif != NULL) {
385 ethif_disable(ndev_array[slot].ndev_ethif);
387 ndev_pending++;
390 ndev_array[slot].ndev_endpt = endpt;
392 /* Attempt to resume communication. */
393 ndev_send_init(&ndev_array[slot]);
395 return;
399 if (ndev == NULL) {
401 * If there is no free slot for this driver in our table, we
402 * necessarily have to ignore the driver altogether. We report
403 * such cases once, so that the user can recompile if desired.
405 if (ndev_max == __arraycount(ndev_array)) {
406 if (!reported) {
407 printf("LWIP: not enough ndev slots!\n");
409 reported = TRUE;
411 return;
414 ndev = &ndev_array[ndev_max++];
417 /* Initialize the slot. */
418 ndev->ndev_endpt = endpt;
419 strlcpy(ndev->ndev_label, label, sizeof(ndev->ndev_label));
420 ndev->ndev_ethif = NULL;
421 ndev_queue_init(&ndev->ndev_sendq);
422 ndev_queue_init(&ndev->ndev_recvq);
424 ndev_send_init(ndev);
426 ndev_pending++;
430 * A network device driver has been terminated.
432 static void
433 ndev_down(struct ndev * ndev)
436 /* Cancel any ongoing requests. */
437 ndev_queue_reset(&ndev->ndev_sendq);
438 ndev_queue_reset(&ndev->ndev_recvq);
441 * If this ndev object had a corresponding ethif object, tell the ethif
442 * layer that the device is really gone now.
444 if (ndev->ndev_ethif != NULL)
445 ethif_remove(ndev->ndev_ethif);
446 else
447 ndev_pending--;
449 /* Remove the driver from our own administration. */
450 ndev->ndev_endpt = NONE;
452 while (ndev_max > 0 && ndev_array[ndev_max - 1].ndev_endpt == NONE)
453 ndev_max--;
457 * The DS service has notified us of changes to our subscriptions. That means
458 * that network drivers may have been started, restarted, and/or shut down.
459 * Find out what has changed, and act accordingly.
461 void
462 ndev_check(void)
464 static const char *prefix = "drv.net.";
465 char key[DS_MAX_KEYLEN], *label;
466 size_t prefixlen;
467 endpoint_t endpt;
468 uint32_t val;
469 ndev_id_t slot;
470 int r;
472 prefixlen = strlen(prefix);
474 /* Check whether any drivers have been (re)started. */
475 while ((r = ds_check(key, NULL, &endpt)) == OK) {
476 if (strncmp(key, prefix, prefixlen) != 0 || endpt == NONE)
477 continue;
479 if (ds_retrieve_u32(key, &val) != OK || val != DS_DRIVER_UP)
480 continue;
482 label = &key[prefixlen];
483 if (label[0] == '\0' || memchr(label, '\0', LABEL_MAX) == NULL)
484 continue;
486 ndev_up(label, endpt);
489 if (r != ENOENT)
490 printf("LWIP: DS check failed (%d)\n", r);
493 * Check whether the drivers we currently know about are still up. The
494 * ones that are not are really gone. It is no problem that we recheck
495 * any drivers that have just been reported by ds_check() above.
496 * However, we cannot check the same key: while the driver is being
497 * restarted, its driver status is already gone from DS. Instead, see
498 * if there is still an entry for its label, as that entry remains in
499 * existence during the restart. The associated endpoint may still
500 * change however, so do not check that part: in such cases we will get
501 * a driver-up announcement later anyway.
503 for (slot = 0; slot < ndev_max; slot++) {
504 if (ndev_array[slot].ndev_endpt == NONE)
505 continue;
507 if (ds_retrieve_label_endpt(ndev_array[slot].ndev_label,
508 &endpt) != OK)
509 ndev_down(&ndev_array[slot]);
514 * A network device driver has sent a reply to our initialization request.
516 static void
517 ndev_init_reply(struct ndev * ndev, const message * m_ptr)
519 struct ndev_hwaddr hwaddr;
520 uint8_t hwaddr_len, max_send, max_recv;
521 const char *name;
522 int enabled;
525 * Make sure that we were waiting for a reply to an initialization
526 * request, and that this is the reply to that request.
528 if (NDEV_ACTIVE(ndev) ||
529 m_ptr->m_netdriver_ndev_init_reply.id != ndev->ndev_sendq.nq_head)
530 return;
533 * Do just enough sanity checking on the data to pass it up to the
534 * ethif layer, which will check the rest (e.g., name duplicates).
536 if (memchr(m_ptr->m_netdriver_ndev_init_reply.name, '\0',
537 sizeof(m_ptr->m_netdriver_ndev_init_reply.name)) == NULL ||
538 m_ptr->m_netdriver_ndev_init_reply.name[0] == '\0') {
539 printf("LWIP: driver %d provided invalid name\n",
540 m_ptr->m_source);
542 ndev_down(ndev);
544 return;
547 hwaddr_len = m_ptr->m_netdriver_ndev_init_reply.hwaddr_len;
548 if (hwaddr_len < 1 || hwaddr_len > __arraycount(hwaddr.nhwa_addr)) {
549 printf("LWIP: driver %d provided invalid HW-addr length\n",
550 m_ptr->m_source);
552 ndev_down(ndev);
554 return;
557 if ((max_send = m_ptr->m_netdriver_ndev_init_reply.max_send) < 1 ||
558 (max_recv = m_ptr->m_netdriver_ndev_init_reply.max_recv) < 1) {
559 printf("LWIP: driver %d provided invalid queue maximum\n",
560 m_ptr->m_source);
562 ndev_down(ndev);
564 return;
568 * If the driver is new, allocate a new ethif object for it. On
569 * success, or if the driver was restarted, (re)enable the interface.
570 * Both calls may fail, in which case we should forget about the
571 * driver. It may continue to send us messages, which we should then
572 * discard.
574 name = m_ptr->m_netdriver_ndev_init_reply.name;
576 if (ndev->ndev_ethif == NULL) {
577 ndev->ndev_ethif = ethif_add((ndev_id_t)(ndev - ndev_array),
578 name, m_ptr->m_netdriver_ndev_init_reply.caps);
579 name = NULL;
582 if (ndev->ndev_ethif != NULL) {
584 * Set the maximum numbers of pending requests (for each
585 * direction) first, because enabling the interface may cause
586 * the ethif layer to start sending requests immediately.
588 ndev->ndev_sendq.nq_max = max_send;
589 ndev->ndev_sendq.nq_head++;
592 * Limit the maximum number of concurrently pending receive
593 * requests to our configured maximum. For send requests, we
594 * use a more dynamic approach with spare request objects.
596 if (max_recv > NDEV_RECVQ)
597 max_recv = NDEV_RECVQ;
598 ndev->ndev_recvq.nq_max = max_recv;
599 ndev->ndev_recvq.nq_head++;
601 memset(&hwaddr, 0, sizeof(hwaddr));
602 memcpy(hwaddr.nhwa_addr,
603 m_ptr->m_netdriver_ndev_init_reply.hwaddr, hwaddr_len);
606 * Provide a NULL pointer for the name if we have only just
607 * added the interface at all. The callee may use this to
608 * determine whether the driver is new or has been restarted.
610 enabled = ethif_enable(ndev->ndev_ethif, name, &hwaddr,
611 m_ptr->m_netdriver_ndev_init_reply.hwaddr_len,
612 m_ptr->m_netdriver_ndev_init_reply.caps,
613 m_ptr->m_netdriver_ndev_init_reply.link,
614 m_ptr->m_netdriver_ndev_init_reply.media);
615 } else
616 enabled = FALSE;
619 * If we did not manage to enable the interface, remove it again,
620 * possibly also from the ethif layer.
622 if (!enabled)
623 ndev_down(ndev);
624 else
625 ndev_pending--;
629 * Request that a network device driver change its configuration. This
630 * function allows for configuration of various different driver and device
631 * aspects: the I/O mode (and multicast receipt list), the enabled (sub)set of
632 * capabilities, the driver-specific flags, and the hardware address. Each of
633 * these settings may be changed by setting the corresponding NDEV_SET_ flag in
634 * the 'set' field of the given configuration structure. It is explicitly
635 * allowed to generate a request with no NDEV_SET_ flags; such a request will
636 * be sent to the driver and ultimately generate a response. Return OK if the
637 * configuration request was sent to the driver, EBUSY if no (more) requests
638 * can be sent to the driver right now, or ENOMEM on grant allocation failure.
641 ndev_conf(ndev_id_t id, const struct ndev_conf * nconf)
643 struct ndev *ndev;
644 struct ndev_req *nreq;
645 uint32_t seq;
646 message m;
647 cp_grant_id_t grant;
648 int r;
650 assert(id < __arraycount(ndev_array));
651 ndev = &ndev_array[id];
653 assert(ndev->ndev_endpt != NONE);
654 assert(NDEV_ACTIVE(ndev));
656 if ((nreq = ndev_queue_get(&ndev->ndev_sendq, NDEV_CONF,
657 &seq)) == NULL)
658 return EBUSY;
660 memset(&m, 0, sizeof(m));
661 m.m_type = NDEV_CONF;
662 m.m_ndev_netdriver_conf.id = seq;
663 m.m_ndev_netdriver_conf.set = nconf->nconf_set;
665 grant = GRANT_INVALID;
667 if (nconf->nconf_set & NDEV_SET_MODE) {
668 m.m_ndev_netdriver_conf.mode = nconf->nconf_mode;
670 if (nconf->nconf_mode & NDEV_MODE_MCAST_LIST) {
671 assert(nconf->nconf_mclist != NULL);
672 assert(nconf->nconf_mccount != 0);
674 grant = cpf_grant_direct(ndev->ndev_endpt,
675 (vir_bytes)nconf->nconf_mclist,
676 sizeof(nconf->nconf_mclist[0]) *
677 nconf->nconf_mccount, CPF_READ);
679 if (!GRANT_VALID(grant))
680 return ENOMEM;
682 m.m_ndev_netdriver_conf.mcast_count =
683 nconf->nconf_mccount;
687 m.m_ndev_netdriver_conf.mcast_grant = grant;
689 if (nconf->nconf_set & NDEV_SET_CAPS)
690 m.m_ndev_netdriver_conf.caps = nconf->nconf_caps;
692 if (nconf->nconf_set & NDEV_SET_FLAGS)
693 m.m_ndev_netdriver_conf.flags = nconf->nconf_flags;
695 if (nconf->nconf_set & NDEV_SET_MEDIA)
696 m.m_ndev_netdriver_conf.media = nconf->nconf_media;
698 if (nconf->nconf_set & NDEV_SET_HWADDR)
699 memcpy(m.m_ndev_netdriver_conf.hwaddr,
700 nconf->nconf_hwaddr.nhwa_addr,
701 __arraycount(m.m_ndev_netdriver_conf.hwaddr));
703 if ((r = asynsend3(ndev->ndev_endpt, &m, AMF_NOREPLY)) != OK)
704 panic("asynsend to driver failed: %d", r);
706 nreq->nreq_grant[0] = grant; /* may also be invalid */
707 nreq->nreq_grant[1] = GRANT_INVALID;
709 ndev_queue_add(&ndev->ndev_sendq, nreq);
711 return OK;
715 * The network device driver has sent a reply to a configuration request.
717 static void
718 ndev_conf_reply(struct ndev * ndev, const message * m_ptr)
722 * Was this the request we were waiting for? If so, remove it from the
723 * send queue. Otherwise, ignore this reply message.
725 if (!NDEV_ACTIVE(ndev) || !ndev_queue_remove(&ndev->ndev_sendq,
726 NDEV_CONF, m_ptr->m_netdriver_ndev_reply.id))
727 return;
729 /* Tell the ethif layer about the updated configuration. */
730 assert(ndev->ndev_ethif != NULL);
732 ethif_configured(ndev->ndev_ethif,
733 m_ptr->m_netdriver_ndev_reply.result);
737 * Construct a packet send or receive request and send it off to a network
738 * driver. The given pbuf chain may be part of a queue. Return OK if the
739 * request was successfully sent, or ENOMEM on grant allocation failure.
741 static int
742 ndev_transfer(struct ndev * ndev, const struct pbuf * pbuf, int do_send,
743 uint32_t seq, struct ndev_req * nreq)
745 cp_grant_id_t grant;
746 message m;
747 unsigned int i;
748 size_t left;
749 int r;
751 memset(&m, 0, sizeof(m));
752 m.m_type = (do_send) ? NDEV_SEND : NDEV_RECV;
753 m.m_ndev_netdriver_transfer.id = seq;
755 left = pbuf->tot_len;
757 for (i = 0; left > 0; i++) {
758 assert(i < NDEV_IOV_MAX);
760 grant = cpf_grant_direct(ndev->ndev_endpt,
761 (vir_bytes)pbuf->payload, pbuf->len,
762 (do_send) ? CPF_READ : CPF_WRITE);
764 if (!GRANT_VALID(grant)) {
765 while (i-- > 0)
766 (void)cpf_revoke(nreq->nreq_grant[i]);
768 return ENOMEM;
771 m.m_ndev_netdriver_transfer.grant[i] = grant;
772 m.m_ndev_netdriver_transfer.len[i] = pbuf->len;
774 nreq->nreq_grant[i] = grant;
776 assert(left >= pbuf->len);
777 left -= pbuf->len;
778 pbuf = pbuf->next;
781 m.m_ndev_netdriver_transfer.count = i;
784 * Unless the array is full, an invalid grant marks the end of the list
785 * of invalid grants.
787 if (i < __arraycount(nreq->nreq_grant))
788 nreq->nreq_grant[i] = GRANT_INVALID;
790 if ((r = asynsend3(ndev->ndev_endpt, &m, AMF_NOREPLY)) != OK)
791 panic("asynsend to driver failed: %d", r);
793 return OK;
797 * Send a packet to the given network driver. Return OK if the packet is sent
798 * off to the driver, EBUSY if no (more) packets can be sent to the driver at
799 * this time, or ENOMEM on grant allocation failure.
801 * The use of 'pbuf' in this interface is a bit ugly, but it saves us from
802 * having to go through an intermediate representation (e.g. an iovec array)
803 * for the data being sent. The same applies to ndev_receive().
806 ndev_send(ndev_id_t id, const struct pbuf * pbuf)
808 struct ndev *ndev;
809 struct ndev_req *nreq;
810 uint32_t seq;
811 int r;
813 assert(id < __arraycount(ndev_array));
814 ndev = &ndev_array[id];
816 assert(ndev->ndev_endpt != NONE);
817 assert(NDEV_ACTIVE(ndev));
819 if ((nreq = ndev_queue_get(&ndev->ndev_sendq, NDEV_SEND,
820 &seq)) == NULL)
821 return EBUSY;
823 if ((r = ndev_transfer(ndev, pbuf, TRUE /*do_send*/, seq, nreq)) != OK)
824 return r;
826 ndev_queue_add(&ndev->ndev_sendq, nreq);
828 return OK;
832 * The network device driver has sent a reply to a send request.
834 static void
835 ndev_send_reply(struct ndev * ndev, const message * m_ptr)
839 * Was this the request we were waiting for? If so, remove it from the
840 * send queue. Otherwise, ignore this reply message.
842 if (!NDEV_ACTIVE(ndev) || !ndev_queue_remove(&ndev->ndev_sendq,
843 NDEV_SEND, m_ptr->m_netdriver_ndev_reply.id))
844 return;
846 /* Tell the ethif layer about the result of the transmission. */
847 assert(ndev->ndev_ethif != NULL);
849 ethif_sent(ndev->ndev_ethif,
850 m_ptr->m_netdriver_ndev_reply.result);
854 * Return TRUE if a new receive request can be spawned for a particular network
855 * driver, or FALSE if its queue of receive requests is full. This call exists
856 * merely to avoid needless buffer allocatin in the case that ndev_recv() is
857 * going to return EBUSY anyway.
860 ndev_can_recv(ndev_id_t id)
862 struct ndev *ndev;
864 assert(id < __arraycount(ndev_array));
865 ndev = &ndev_array[id];
867 assert(ndev->ndev_endpt != NONE);
868 assert(NDEV_ACTIVE(ndev));
870 return (ndev->ndev_recvq.nq_count < ndev->ndev_recvq.nq_max);
874 * Start the process of receiving a packet from a network driver. The packet
875 * will be stored in the given pbuf chain upon completion. Return OK if the
876 * receive request is sent to the driver, EBUSY if the maximum number of
877 * concurrent receive requests has been reached for this driver, or ENOMEM on
878 * grant allocation failure.
881 ndev_recv(ndev_id_t id, struct pbuf * pbuf)
883 struct ndev *ndev;
884 struct ndev_req *nreq;
885 uint32_t seq;
886 int r;
888 assert(id < __arraycount(ndev_array));
889 ndev = &ndev_array[id];
891 assert(ndev->ndev_endpt != NONE);
892 assert(NDEV_ACTIVE(ndev));
894 if ((nreq = ndev_queue_get(&ndev->ndev_recvq, NDEV_RECV,
895 &seq)) == NULL)
896 return EBUSY;
898 if ((r = ndev_transfer(ndev, pbuf, FALSE /*do_send*/, seq,
899 nreq)) != OK)
900 return r;
902 ndev_queue_add(&ndev->ndev_recvq, nreq);
904 return OK;
908 * The network device driver has sent a reply to a receive request.
910 static void
911 ndev_recv_reply(struct ndev * ndev, const message * m_ptr)
915 * Was this the request we were waiting for? If so, remove it from the
916 * receive queue. Otherwise, ignore this reply message.
918 if (!NDEV_ACTIVE(ndev) || !ndev_queue_remove(&ndev->ndev_recvq,
919 NDEV_RECV, m_ptr->m_netdriver_ndev_reply.id))
920 return;
922 /* Tell the ethif layer about the result of the receipt. */
923 assert(ndev->ndev_ethif != NULL);
925 ethif_received(ndev->ndev_ethif,
926 m_ptr->m_netdriver_ndev_reply.result);
930 * A network device driver sent a status report to us. Process it and send a
931 * reply.
933 static void
934 ndev_status(struct ndev * ndev, const message * m_ptr)
936 message m;
937 int r;
939 if (!NDEV_ACTIVE(ndev))
940 return;
942 /* Tell the ethif layer about the status update. */
943 assert(ndev->ndev_ethif != NULL);
945 ethif_status(ndev->ndev_ethif, m_ptr->m_netdriver_ndev_status.link,
946 m_ptr->m_netdriver_ndev_status.media,
947 m_ptr->m_netdriver_ndev_status.oerror,
948 m_ptr->m_netdriver_ndev_status.coll,
949 m_ptr->m_netdriver_ndev_status.ierror,
950 m_ptr->m_netdriver_ndev_status.iqdrop);
953 * Send a reply, so that the driver knows it can send a new status
954 * update without risking asynsend queue overflows. The ID of these
955 * messages is chosen by the driver and and we simply echo it.
957 memset(&m, 0, sizeof(m));
958 m.m_type = NDEV_STATUS_REPLY;
959 m.m_ndev_netdriver_status_reply.id = m_ptr->m_netdriver_ndev_status.id;
961 if ((r = asynsend(m_ptr->m_source, &m)) != OK)
962 panic("asynsend to driver failed: %d", r);
966 * Process a network driver reply message.
968 void
969 ndev_process(const message * m_ptr, int ipc_status)
971 struct ndev *ndev;
972 endpoint_t endpt;
973 ndev_id_t slot;
975 /* Find the slot of the driver that sent the message, if any. */
976 endpt = m_ptr->m_source;
978 for (slot = 0, ndev = ndev_array; slot < ndev_max; slot++, ndev++)
979 if (ndev->ndev_endpt == endpt)
980 break;
983 * If we cannot find a slot for the driver, drop the message. We may
984 * be ignoring the driver because it misbehaved or we are out of slots.
986 if (slot == ndev_max)
987 return;
990 * Process the reply message. For future compatibility, ignore any
991 * unrecognized message types.
993 switch (m_ptr->m_type) {
994 case NDEV_INIT_REPLY:
995 ndev_init_reply(ndev, m_ptr);
997 break;
999 case NDEV_CONF_REPLY:
1000 ndev_conf_reply(ndev, m_ptr);
1002 break;
1004 case NDEV_SEND_REPLY:
1005 ndev_send_reply(ndev, m_ptr);
1007 break;
1009 case NDEV_RECV_REPLY:
1010 ndev_recv_reply(ndev, m_ptr);
1012 break;
1014 case NDEV_STATUS:
1015 ndev_status(ndev, m_ptr);
1017 break;