1 /* $NetBSD: maple.c,v 1.39 2008/08/01 20:19:49 marcus Exp $ */
4 * Copyright (c) 2002 The NetBSD Foundation, Inc.
7 * This code is derived from software contributed to The NetBSD Foundation
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
33 * Copyright (c) 2001 Marcus Comstedt
34 * All rights reserved.
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. All advertising materials mentioning features or use of this software
45 * must display the following acknowledgement:
46 * This product includes software developed by Marcus Comstedt.
47 * 4. Neither the name of The NetBSD Foundation nor the names of its
48 * contributors may be used to endorse or promote products derived
49 * from this software without specific prior written permission.
51 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
52 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
53 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
54 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
55 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
56 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
57 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
58 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
59 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
60 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
61 * POSSIBILITY OF SUCH DAMAGE.
64 #include <sys/cdefs.h>
65 __KERNEL_RCSID(0, "$NetBSD: maple.c,v 1.39 2008/08/01 20:19:49 marcus Exp $");
67 #include <sys/param.h>
68 #include <sys/device.h>
69 #include <sys/fcntl.h>
70 #include <sys/kernel.h>
71 #include <sys/kthread.h>
73 #include <sys/select.h>
75 #include <sys/signalvar.h>
76 #include <sys/systm.h>
79 #include <uvm/uvm_extern.h>
81 #include <machine/cpu.h>
82 #include <machine/bus.h>
83 #include <machine/sysasicvar.h>
86 #include <dreamcast/dev/maple/maple.h>
87 #include <dreamcast/dev/maple/mapleconf.h>
88 #include <dreamcast/dev/maple/maplevar.h>
89 #include <dreamcast/dev/maple/maplereg.h>
90 #include <dreamcast/dev/maple/mapleio.h>
94 /* Internal macros, functions, and variables. */
96 #define MAPLE_CALLOUT_TICKS 2
98 #define MAPLEBUSUNIT(dev) (minor(dev)>>5)
99 #define MAPLEPORT(dev) ((minor(dev) & 0x18) >> 3)
100 #define MAPLESUBUNIT(dev) (minor(dev) & 0x7)
102 /* interrupt priority level */
103 #define IPL_MAPLE IPL_BIO
104 #define splmaple() splbio()
105 #define IRL_MAPLE SYSASIC_IRL9
108 * Function declarations.
110 static int maplematch(struct device
*, struct cfdata
*, void *);
111 static void mapleattach(struct device
*, struct device
*, void *);
112 static void maple_scanbus(struct maple_softc
*);
113 static char * maple_unit_name(char *, int port
, int subunit
);
114 static void maple_begin_txbuf(struct maple_softc
*);
115 static int maple_end_txbuf(struct maple_softc
*);
116 static void maple_queue_command(struct maple_softc
*, struct maple_unit
*,
117 int command
, int datalen
, const void *dataaddr
);
118 static void maple_write_command(struct maple_softc
*, struct maple_unit
*,
119 int, int, const void *);
120 static void maple_start(struct maple_softc
*sc
);
121 static void maple_start_poll(struct maple_softc
*);
122 static void maple_check_subunit_change(struct maple_softc
*,
123 struct maple_unit
*);
124 static void maple_check_unit_change(struct maple_softc
*,
125 struct maple_unit
*);
126 static void maple_print_unit(void *, const char *);
127 static int maplesubmatch(struct device
*, struct cfdata
*,
128 const int *, void *);
129 static int mapleprint(void *, const char *);
130 static void maple_attach_unit(struct maple_softc
*, struct maple_unit
*);
131 static void maple_detach_unit_nofix(struct maple_softc
*,
132 struct maple_unit
*);
133 static void maple_detach_unit(struct maple_softc
*, struct maple_unit
*);
134 static void maple_queue_cmds(struct maple_softc
*,
135 struct maple_cmdq_head
*);
136 static void maple_unit_probe(struct maple_softc
*);
137 static void maple_unit_ping(struct maple_softc
*);
138 static int maple_send_defered_periodic(struct maple_softc
*);
139 static void maple_send_periodic(struct maple_softc
*);
140 static void maple_remove_from_queues(struct maple_softc
*,
141 struct maple_unit
*);
142 static int maple_retry(struct maple_softc
*, struct maple_unit
*,
143 enum maple_dma_stat
);
144 static void maple_queue_retry(struct maple_softc
*);
145 static void maple_check_responses(struct maple_softc
*);
146 static void maple_event_thread(void *);
147 static int maple_intr(void *);
148 static void maple_callout(void *);
150 int maple_alloc_dma(size_t, vaddr_t
*, paddr_t
*);
152 void maple_free_dma(paddr_t
, size_t);
158 int maple_polling
; /* Are we polling? (Debugger mode) */
160 CFATTACH_DECL(maple
, sizeof(struct maple_softc
),
161 maplematch
, mapleattach
, NULL
, NULL
);
163 extern struct cfdriver maple_cd
;
165 dev_type_open(mapleopen
);
166 dev_type_close(mapleclose
);
167 dev_type_ioctl(mapleioctl
);
169 const struct cdevsw maple_cdevsw
= {
170 mapleopen
, mapleclose
, noread
, nowrite
, mapleioctl
,
171 nostop
, notty
, nopoll
, nommap
, nokqfilter
,
175 maplematch(struct device
*parent
, struct cfdata
*cf
, void *aux
)
182 mapleattach(struct device
*parent
, struct device
*self
, void *aux
)
184 struct maple_softc
*sc
;
185 struct maple_unit
*u
;
187 paddr_t dmabuffer_phys
;
189 int port
, subunit
, f
;
191 sc
= (struct maple_softc
*)self
;
193 printf(": %s\n", sysasic_intr_string(IRL_MAPLE
));
195 if (maple_alloc_dma(MAPLE_DMABUF_SIZE
, &dmabuffer
, &dmabuffer_phys
)) {
196 printf("%s: unable to allocate DMA buffers.\n",
197 sc
->sc_dev
.dv_xname
);
201 p
= (uint32_t *)dmabuffer
;
203 for (port
= 0; port
< MAPLE_PORTS
; port
++) {
204 for (subunit
= 0; subunit
< MAPLE_SUBUNITS
; subunit
++) {
205 u
= &sc
->sc_unit
[port
][subunit
];
207 u
->subunit
= subunit
;
208 u
->u_dma_stat
= MAPLE_DMA_IDLE
;
210 u
->u_rxbuf_phys
= SH3_P2SEG_TO_PHYS(p
);
213 for (f
= 0; f
< MAPLE_NFUNC
; f
++) {
214 u
->u_func
[f
].f_funcno
= f
;
215 u
->u_func
[f
].f_unit
= u
;
221 sc
->sc_txbuf_phys
= SH3_P2SEG_TO_PHYS(p
);
223 SIMPLEQ_INIT(&sc
->sc_retryq
);
224 TAILQ_INIT(&sc
->sc_probeq
);
225 TAILQ_INIT(&sc
->sc_pingq
);
226 TAILQ_INIT(&sc
->sc_periodicq
);
227 TAILQ_INIT(&sc
->sc_periodicdeferq
);
228 TAILQ_INIT(&sc
->sc_acmdq
);
229 TAILQ_INIT(&sc
->sc_pcmdq
);
231 MAPLE_RESET
= RESET_MAGIC
;
234 MAPLE_SPEED
= SPEED_2MBPS
| TIMEOUT(50000);
241 callout_init(&sc
->maple_callout_ch
, 0);
243 sc
->sc_intrhand
= sysasic_intr_establish(SYSASIC_EVENT_MAPLE_DMADONE
,
244 IPL_MAPLE
, IRL_MAPLE
, maple_intr
, sc
);
246 config_pending_incr(); /* create thread before mounting root */
248 if (kthread_create(PRI_NONE
, 0, NULL
, maple_event_thread
, sc
,
249 &sc
->event_thread
, "%s", sc
->sc_dev
.dv_xname
) == 0)
252 panic("%s: unable to create event thread", sc
->sc_dev
.dv_xname
);
256 * initial device attach
259 maple_scanbus(struct maple_softc
*sc
)
261 struct maple_unit
*u
;
263 int last_port
, last_subunit
;
266 KASSERT(cold
&& maple_polling
);
268 /* probe all ports */
269 for (port
= 0; port
< MAPLE_PORTS
; port
++) {
270 u
= &sc
->sc_unit
[port
][0];
271 #if defined(MAPLE_DEBUG) && MAPLE_DEBUG > 2
274 printf("%s: queued to probe 1\n",
275 maple_unit_name(buf
, u
->port
, u
->subunit
));
278 TAILQ_INSERT_TAIL(&sc
->sc_probeq
, u
, u_q
);
279 u
->u_queuestat
= MAPLE_QUEUE_PROBE
;
282 last_port
= last_subunit
= -1;
283 maple_begin_txbuf(sc
);
284 while ((u
= TAILQ_FIRST(&sc
->sc_probeq
)) != NULL
) {
286 * Check wrap condition
288 if (u
->port
< last_port
|| u
->subunit
<= last_subunit
)
291 if (u
->port
== MAPLE_PORTS
- 1)
292 last_subunit
= u
->subunit
;
294 maple_unit_probe(sc
);
295 for (i
= 10 /* just not forever */; maple_end_txbuf(sc
); i
--) {
296 maple_start_poll(sc
);
297 maple_check_responses(sc
);
300 /* attach may issue cmds */
301 maple_queue_cmds(sc
, &sc
->sc_acmdq
);
307 maple_run_polling(struct device
*dev
)
309 struct maple_softc
*sc
;
313 sc
= (struct maple_softc
*)dev
;
316 * first, make sure polling works
318 while (MAPLE_STATE
!= 0) /* XXX may lost a DMA cycle */
321 /* XXX this will break internal state */
322 for (port
= 0; port
< MAPLE_PORTS
; port
++)
323 for (subunit
= 0; subunit
< MAPLE_SUBUNITS
; subunit
++)
324 sc
->sc_unit
[port
][subunit
].u_dma_stat
= MAPLE_DMA_IDLE
;
325 SIMPLEQ_INIT(&sc
->sc_retryq
); /* XXX discard current retrys */
328 * do polling (periodic status check only)
330 maple_begin_txbuf(sc
);
331 maple_send_defered_periodic(sc
);
332 maple_send_periodic(sc
);
333 for (i
= 10 /* just not forever */; maple_end_txbuf(sc
); i
--) {
334 maple_start_poll(sc
);
335 maple_check_responses(sc
);
339 /* maple_check_responses() has executed maple_begin_txbuf() */
340 maple_queue_retry(sc
);
341 maple_send_defered_periodic(sc
);
346 maple_unit_name(char *buf
, int port
, int subunit
)
349 sprintf(buf
, "maple%c", port
+ 'A');
351 sprintf(buf
+6, "%d", subunit
);
357 maple_alloc_dma(size_t size
, vaddr_t
*vap
, paddr_t
*pap
)
359 extern paddr_t avail_start
, avail_end
; /* from pmap.c */
364 size
= round_page(size
);
366 error
= uvm_pglistalloc(size
, avail_start
, avail_end
- PAGE_SIZE
,
371 m
= TAILQ_FIRST(&mlist
);
372 *pap
= VM_PAGE_TO_PHYS(m
);
373 *vap
= SH3_PHYS_TO_P2SEG(VM_PAGE_TO_PHYS(m
));
378 #if 0 /* currently unused */
380 maple_free_dma(paddr_t paddr
, size_t size
)
387 for (addr
= paddr
; addr
< paddr
+ size
; addr
+= PAGE_SIZE
) {
388 m
= PHYS_TO_VM_PAGE(addr
);
389 TAILQ_INSERT_TAIL(&mlist
, m
, pageq
.queue
);
391 uvm_pglistfree(&mlist
);
396 maple_begin_txbuf(struct maple_softc
*sc
)
399 sc
->sc_txlink
= sc
->sc_txpos
= sc
->sc_txbuf
;
400 SIMPLEQ_INIT(&sc
->sc_dmaq
);
404 maple_end_txbuf(struct maple_softc
*sc
)
407 /* if no frame have been written, we can't mark the
408 list end, and so the DMA must not be activated */
409 if (sc
->sc_txpos
== sc
->sc_txbuf
)
412 *sc
->sc_txlink
|= 0x80000000;
417 static const int8_t subunit_code
[] = { 0x20, 0x01, 0x02, 0x04, 0x08, 0x10 };
420 maple_queue_command(struct maple_softc
*sc
, struct maple_unit
*u
,
421 int command
, int datalen
, const void *dataaddr
)
424 uint32_t *p
= sc
->sc_txpos
;
426 /* Max data length = 255 longs = 1020 bytes */
427 KASSERT(datalen
>= 0 && datalen
<= 255);
429 /* Compute sender and recipient address */
431 to
= from
| subunit_code
[u
->subunit
];
435 /* Set length of packet and destination port (A-D) */
436 *p
++ = datalen
| (u
->port
<< 16);
438 /* Write address to receive buffer where the response
439 frame should be put */
440 *p
++ = u
->u_rxbuf_phys
;
442 /* Create the frame header. The fields are assembled "backwards"
443 because of the Maple Bus big-endianness. */
444 *p
++ = (command
& 0xff) | (to
<< 8) | (from
<< 16) | (datalen
<< 24);
446 /* Copy parameter data, if any */
448 const uint32_t *param
= dataaddr
;
450 for (i
= 0; i
< datalen
; i
++)
456 SIMPLEQ_INSERT_TAIL(&sc
->sc_dmaq
, u
, u_dmaq
);
460 maple_write_command(struct maple_softc
*sc
, struct maple_unit
*u
, int command
,
461 int datalen
, const void *dataaddr
)
463 #if defined(MAPLE_DEBUG) && MAPLE_DEBUG > 2
467 printf("%s: retrycnt %d\n",
468 maple_unit_name(buf
, u
->port
, u
->subunit
), u
->u_retrycnt
);
471 u
->u_command
= command
;
472 u
->u_datalen
= datalen
;
473 u
->u_dataaddr
= dataaddr
;
475 maple_queue_command(sc
, u
, command
, datalen
, dataaddr
);
480 maple_start(struct maple_softc
*sc
)
483 MAPLE_DMAADDR
= sc
->sc_txbuf_phys
;
487 /* start DMA -- wait until DMA done */
489 maple_start_poll(struct maple_softc
*sc
)
492 MAPLE_DMAADDR
= sc
->sc_txbuf_phys
;
494 while (MAPLE_STATE
!= 0)
499 maple_check_subunit_change(struct maple_softc
*sc
, struct maple_unit
*u
)
501 struct maple_unit
*u1
;
507 KASSERT(u
->subunit
== 0);
510 unit_map
= ((int8_t *) u
->u_rxbuf
)[2];
511 if (sc
->sc_port_unit_map
[port
] == unit_map
)
514 units
= ((unit_map
& 0x1f) << 1) | 1;
515 #if defined(MAPLE_DEBUG) && MAPLE_DEBUG > 1
518 printf("%s: unit_map 0x%x -> 0x%x (units 0x%x)\n",
519 maple_unit_name(buf
, u
->port
, u
->subunit
),
520 sc
->sc_port_unit_map
[port
], unit_map
, units
);
523 #if 0 /* this detects unit removal rapidly but is not reliable */
524 /* check for unit change */
525 un
= sc
->sc_port_units
[port
] & ~units
;
527 /* detach removed devices */
528 for (i
= MAPLE_SUBUNITS
- 1; i
> 0; i
--)
530 maple_detach_unit_nofix(sc
, &sc
->sc_unit
[port
][i
]);
533 sc
->sc_port_unit_map
[port
] = unit_map
;
535 /* schedule scanning child devices */
536 un
= units
& ~sc
->sc_port_units
[port
];
537 for (i
= MAPLE_SUBUNITS
- 1; i
> 0; i
--)
539 u1
= &sc
->sc_unit
[port
][i
];
540 maple_remove_from_queues(sc
, u1
);
541 #if defined(MAPLE_DEBUG) && MAPLE_DEBUG > 2
544 printf("%s: queued to probe 2\n",
545 maple_unit_name(buf
, u1
->port
, u1
->subunit
));
548 TAILQ_INSERT_HEAD(&sc
->sc_probeq
, u1
, u_q
);
549 u1
->u_queuestat
= MAPLE_QUEUE_PROBE
;
550 u1
->u_proberetry
= 0;
555 maple_check_unit_change(struct maple_softc
*sc
, struct maple_unit
*u
)
557 struct maple_devinfo
*newinfo
= (void *) (u
->u_rxbuf
+ 1);
561 subunit
= u
->subunit
;
562 if (memcmp(&u
->devinfo
, newinfo
, sizeof(struct maple_devinfo
)) == 0)
563 goto out
; /* no change */
567 /* attach this device */
568 u
->devinfo
= *newinfo
;
569 maple_attach_unit(sc
, u
);
572 maple_remove_from_queues(sc
, u
);
573 #if defined(MAPLE_DEBUG) && MAPLE_DEBUG > 2
576 printf("%s: queued to ping\n",
577 maple_unit_name(buf
, u
->port
, u
->subunit
));
580 TAILQ_INSERT_TAIL(&sc
->sc_pingq
, u
, u_q
);
581 u
->u_queuestat
= MAPLE_QUEUE_PING
;
585 maple_print_unit(void *aux
, const char *pnp
)
587 struct maple_attach_args
*ma
= aux
;
592 port
= ma
->ma_unit
->port
;
593 subunit
= ma
->ma_unit
->subunit
;
596 printf("%s at %s", maple_unit_name(buf
, port
, subunit
), pnp
);
598 printf(" port %d", port
);
601 printf(" subunit %d", subunit
);
604 printf(": a %#x c %#x fn %#x d %#x,%#x,%#x",
605 ma
->ma_devinfo
->di_area_code
,
606 ma
->ma_devinfo
->di_connector_direction
,
607 be32toh(ma
->ma_devinfo
->di_func
),
608 be32toh(ma
->ma_devinfo
->di_function_data
[0]),
609 be32toh(ma
->ma_devinfo
->di_function_data
[1]),
610 be32toh(ma
->ma_devinfo
->di_function_data
[2]));
613 /* nul termination */
614 prod
= ma
->ma_devinfo
->di_product_name
;
615 for (p
= prod
+ sizeof ma
->ma_devinfo
->di_product_name
; p
>= prod
; p
--)
616 if (p
[-1] != '\0' && p
[-1] != ' ')
621 printf(": %s", prod
);
623 *p
= oc
; /* restore */
627 maplesubmatch(struct device
*parent
, struct cfdata
*match
,
628 const int *ldesc
, void *aux
)
630 struct maple_attach_args
*ma
= aux
;
632 if (match
->cf_loc
[MAPLECF_PORT
] != MAPLECF_PORT_DEFAULT
&&
633 match
->cf_loc
[MAPLECF_PORT
] != ma
->ma_unit
->port
)
636 if (match
->cf_loc
[MAPLECF_SUBUNIT
] != MAPLECF_SUBUNIT_DEFAULT
&&
637 match
->cf_loc
[MAPLECF_SUBUNIT
] != ma
->ma_unit
->subunit
)
640 return config_match(parent
, match
, aux
);
644 mapleprint(void *aux
, const char *str
)
646 struct maple_attach_args
*ma
= aux
;
650 aprint_normal("%s", str
);
651 aprint_normal(" function %d", ma
->ma_function
);
656 aprint_normal(" function %d", ma
->ma_function
);
663 maple_attach_unit(struct maple_softc
*sc
, struct maple_unit
*u
)
665 struct maple_attach_args ma
;
671 ma
.ma_devinfo
= &u
->devinfo
;
672 ma
.ma_basedevinfo
= &sc
->sc_unit
[u
->port
][0].devinfo
;
673 func
= be32toh(ma
.ma_devinfo
->di_func
);
675 maple_print_unit(&ma
, sc
->sc_dev
.dv_xname
);
677 strcpy(oldxname
, sc
->sc_dev
.dv_xname
);
678 maple_unit_name(sc
->sc_dev
.dv_xname
, u
->port
, u
->subunit
);
680 for (f
= 0; f
< MAPLE_NFUNC
; f
++) {
681 u
->u_func
[f
].f_callback
= NULL
;
682 u
->u_func
[f
].f_arg
= NULL
;
683 u
->u_func
[f
].f_cmdstat
= MAPLE_CMDSTAT_NONE
;
684 u
->u_func
[f
].f_dev
= NULL
;
685 if (func
& MAPLE_FUNC(f
)) {
687 u
->u_func
[f
].f_dev
= config_found_sm_loc(&sc
->sc_dev
,
688 "maple", NULL
, &ma
, mapleprint
, maplesubmatch
);
689 u
->u_ping_func
= f
; /* XXX using largest func */
692 #ifdef MAPLE_MEMCARD_PING_HACK
694 * Some 3rd party memory card pretend to be Visual Memory,
695 * but need special handling for ping.
697 if (func
== (MAPLE_FUNC(MAPLE_FN_MEMCARD
) | MAPLE_FUNC(MAPLE_FN_LCD
) |
698 MAPLE_FUNC(MAPLE_FN_CLOCK
))) {
699 u
->u_ping_func
= MAPLE_FN_MEMCARD
;
700 u
->u_ping_stat
= MAPLE_PING_MEMCARD
;
702 u
->u_ping_stat
= MAPLE_PING_NORMAL
;
705 strcpy(sc
->sc_dev
.dv_xname
, oldxname
);
707 sc
->sc_port_units
[u
->port
] |= 1 << u
->subunit
;
711 maple_detach_unit_nofix(struct maple_softc
*sc
, struct maple_unit
*u
)
713 struct maple_func
*fn
;
715 struct maple_unit
*u1
;
721 #if defined(MAPLE_DEBUG) && MAPLE_DEBUG > 1
722 printf("%s: remove\n", maple_unit_name(buf
, u
->port
, u
->subunit
));
724 maple_remove_from_queues(sc
, u
);
726 sc
->sc_port_units
[port
] &= ~(1 << u
->subunit
);
728 if (u
->subunit
== 0) {
729 for (i
= MAPLE_SUBUNITS
- 1; i
> 0; i
--)
730 maple_detach_unit_nofix(sc
, &sc
->sc_unit
[port
][i
]);
733 for (fn
= u
->u_func
; fn
< &u
->u_func
[MAPLE_NFUNC
]; fn
++) {
734 if ((dev
= fn
->f_dev
) != NULL
) {
735 #if defined(MAPLE_DEBUG) && MAPLE_DEBUG > 1
736 printf("%s: detaching func %d\n",
737 maple_unit_name(buf
, port
, u
->subunit
),
742 * Remove functions from command queue.
744 switch (fn
->f_cmdstat
) {
745 case MAPLE_CMDSTAT_ASYNC
:
746 case MAPLE_CMDSTAT_PERIODIC_DEFERED
:
747 TAILQ_REMOVE(&sc
->sc_acmdq
, fn
, f_cmdq
);
749 case MAPLE_CMDSTAT_ASYNC_PERIODICQ
:
750 case MAPLE_CMDSTAT_PERIODIC
:
751 TAILQ_REMOVE(&sc
->sc_pcmdq
, fn
, f_cmdq
);
760 if ((error
= config_detach(fn
->f_dev
, DETACH_FORCE
))) {
761 printf("%s: failed to detach %s (func %d), errno %d\n",
762 maple_unit_name(buf
, port
, u
->subunit
),
763 fn
->f_dev
->dv_xname
, fn
->f_funcno
, error
);
767 maple_enable_periodic(&sc
->sc_dev
, u
, fn
->f_funcno
, 0);
770 fn
->f_callback
= NULL
;
772 fn
->f_cmdstat
= MAPLE_CMDSTAT_NONE
;
774 if (u
->u_dma_stat
== MAPLE_DMA_RETRY
) {
776 SIMPLEQ_FOREACH(u1
, &sc
->sc_retryq
, u_dmaq
) {
778 #if defined(MAPLE_DEBUG) && MAPLE_DEBUG > 1
779 printf("%s: abort retry\n",
780 maple_unit_name(buf
, port
, u
->subunit
));
782 SIMPLEQ_REMOVE(&sc
->sc_retryq
, u
, maple_unit
,
788 u
->u_dma_stat
= MAPLE_DMA_IDLE
;
790 /* u->u_dma_func = uninitialized; */
791 KASSERT(u
->getcond_func_set
== 0);
792 memset(&u
->devinfo
, 0, sizeof(struct maple_devinfo
));
794 if (u
->subunit
== 0) {
795 sc
->sc_port_unit_map
[port
] = 0;
796 #if defined(MAPLE_DEBUG) && MAPLE_DEBUG > 2
799 printf("%s: queued to probe 3\n",
800 maple_unit_name(buf2
, port
, u
->subunit
));
803 TAILQ_INSERT_TAIL(&sc
->sc_probeq
, u
, u_q
);
804 u
->u_queuestat
= MAPLE_QUEUE_PROBE
;
809 maple_detach_unit(struct maple_softc
*sc
, struct maple_unit
*u
)
812 maple_detach_unit_nofix(sc
, u
);
814 sc
->sc_port_unit_map
[u
->port
] &= ~(1 << (u
->subunit
- 1));
818 * Send a command (called by drivers)
820 * The "cataaddr" must not point at temporary storage like stack.
821 * Only one command (per function) is valid at a time.
824 maple_command(struct device
*dev
, struct maple_unit
*u
, int func
,
825 int command
, int datalen
, const void *dataaddr
, int flags
)
827 struct maple_softc
*sc
= (void *) dev
;
828 struct maple_func
*fn
;
831 KASSERT(func
>= 0 && func
< 32);
833 KASSERT((flags
& ~MAPLE_FLAG_CMD_PERIODIC_TIMING
) == 0);
837 fn
= &u
->u_func
[func
];
838 #if 1 /*def DIAGNOSTIC*/
840 if (fn
->f_cmdstat
!= MAPLE_CMDSTAT_NONE
)
841 panic("maple_command: %s func %d: requesting more than one commands",
842 maple_unit_name(buf
, u
->port
, u
->subunit
), func
);
845 fn
->f_command
= command
;
846 fn
->f_datalen
= datalen
;
847 fn
->f_dataaddr
= dataaddr
;
848 if (flags
& MAPLE_FLAG_CMD_PERIODIC_TIMING
) {
849 fn
->f_cmdstat
= MAPLE_CMDSTAT_PERIODIC
;
850 TAILQ_INSERT_TAIL(&sc
->sc_pcmdq
, fn
, f_cmdq
);
852 fn
->f_cmdstat
= MAPLE_CMDSTAT_ASYNC
;
853 TAILQ_INSERT_TAIL(&sc
->sc_acmdq
, fn
, f_cmdq
);
854 wakeup(&sc
->sc_event
); /* wake for async event */
860 maple_queue_cmds(struct maple_softc
*sc
,
861 struct maple_cmdq_head
*head
)
863 struct maple_func
*fn
, *nextfn
;
864 struct maple_unit
*u
;
867 * Note: since the queue element may be queued immediately,
868 * we can't use TAILQ_FOREACH.
870 fn
= TAILQ_FIRST(head
);
872 for ( ; fn
; fn
= nextfn
) {
873 nextfn
= TAILQ_NEXT(fn
, f_cmdq
);
875 KASSERT(fn
->f_cmdstat
!= MAPLE_CMDSTAT_NONE
);
877 if (u
->u_dma_stat
== MAPLE_DMA_IDLE
) {
878 maple_write_command(sc
, u
,
879 fn
->f_command
, fn
->f_datalen
, fn
->f_dataaddr
);
880 u
->u_dma_stat
= (fn
->f_cmdstat
== MAPLE_CMDSTAT_ASYNC
||
881 fn
->f_cmdstat
== MAPLE_CMDSTAT_ASYNC_PERIODICQ
) ?
882 MAPLE_DMA_ACMD
: MAPLE_DMA_PCMD
;
883 u
->u_dma_func
= fn
->f_funcno
;
884 fn
->f_cmdstat
= MAPLE_CMDSTAT_NONE
;
885 } else if (u
->u_dma_stat
== MAPLE_DMA_RETRY
) {
886 /* unit is busy --- try again */
888 * always add to periodic command queue
889 * (wait until the next periodic timing),
890 * since the unit will never be freed until the
891 * next periodic timing.
893 switch (fn
->f_cmdstat
) {
894 case MAPLE_CMDSTAT_ASYNC
:
895 fn
->f_cmdstat
= MAPLE_CMDSTAT_ASYNC_PERIODICQ
;
897 case MAPLE_CMDSTAT_PERIODIC_DEFERED
:
898 fn
->f_cmdstat
= MAPLE_CMDSTAT_PERIODIC
;
903 TAILQ_INSERT_TAIL(&sc
->sc_pcmdq
, fn
, f_cmdq
);
905 /* unit is busy --- try again */
907 * always add to async command queue
908 * (process immediately)
910 switch (fn
->f_cmdstat
) {
911 case MAPLE_CMDSTAT_ASYNC_PERIODICQ
:
912 fn
->f_cmdstat
= MAPLE_CMDSTAT_ASYNC
;
914 case MAPLE_CMDSTAT_PERIODIC
:
915 fn
->f_cmdstat
= MAPLE_CMDSTAT_PERIODIC_DEFERED
;
920 TAILQ_INSERT_TAIL(&sc
->sc_acmdq
, fn
, f_cmdq
);
925 /* schedule probing a device */
927 maple_unit_probe(struct maple_softc
*sc
)
929 struct maple_unit
*u
;
931 if ((u
= TAILQ_FIRST(&sc
->sc_probeq
)) != NULL
) {
932 KASSERT(u
->u_dma_stat
== MAPLE_DMA_IDLE
);
933 KASSERT(u
->u_queuestat
== MAPLE_QUEUE_PROBE
);
934 maple_remove_from_queues(sc
, u
);
935 maple_write_command(sc
, u
, MAPLE_COMMAND_DEVINFO
, 0, NULL
);
936 u
->u_dma_stat
= MAPLE_DMA_PROBE
;
937 /* u->u_dma_func = ignored; */
942 * Enable/disable unit pinging (called by drivers)
946 maple_enable_unit_ping(struct device
*dev
, struct maple_unit
*u
,
947 int func
, int enable
)
949 #if 0 /* currently unused */
950 struct maple_softc
*sc
= (void *) dev
;
954 u
->u_noping
&= ~MAPLE_FUNC(func
);
956 u
->u_noping
|= MAPLE_FUNC(func
);
959 /* schedule pinging a device */
961 maple_unit_ping(struct maple_softc
*sc
)
963 struct maple_unit
*u
;
964 struct maple_func
*fn
;
965 #ifdef MAPLE_MEMCARD_PING_HACK
966 static const uint32_t memcard_ping_arg
[2] = {
967 0x02000000, /* htobe32(MAPLE_FUNC(MAPLE_FN_MEMCARD)) */
968 0 /* pt (1 byte) and unused 3 bytes */
972 if ((u
= TAILQ_FIRST(&sc
->sc_pingq
)) != NULL
) {
973 KASSERT(u
->u_queuestat
== MAPLE_QUEUE_PING
);
974 maple_remove_from_queues(sc
, u
);
975 if (u
->u_dma_stat
== MAPLE_DMA_IDLE
&& u
->u_noping
== 0) {
976 #ifdef MAPLE_MEMCARD_PING_HACK
977 if (u
->u_ping_stat
== MAPLE_PING_MINFO
) {
978 /* use MINFO for some memory cards */
979 maple_write_command(sc
, u
,
980 MAPLE_COMMAND_GETMINFO
,
981 2, memcard_ping_arg
);
985 fn
= &u
->u_func
[u
->u_ping_func
];
986 fn
->f_work
= htobe32(MAPLE_FUNC(u
->u_ping_func
));
987 maple_write_command(sc
, u
,
988 MAPLE_COMMAND_GETCOND
,
991 u
->u_dma_stat
= MAPLE_DMA_PING
;
992 /* u->u_dma_func = XXX; */
994 /* no need if periodic */
995 TAILQ_INSERT_TAIL(&sc
->sc_pingq
, u
, u_q
);
996 u
->u_queuestat
= MAPLE_QUEUE_PING
;
1002 * Enable/disable periodic GETCOND (called by drivers)
1005 maple_enable_periodic(struct device
*dev
, struct maple_unit
*u
,
1008 struct maple_softc
*sc
= (void *) dev
;
1009 struct maple_func
*fn
;
1011 KASSERT(func
>= 0 && func
< 32);
1013 fn
= &u
->u_func
[func
];
1016 if (fn
->f_periodic_stat
== MAPLE_PERIODIC_NONE
) {
1017 TAILQ_INSERT_TAIL(&sc
->sc_periodicq
, fn
, f_periodicq
);
1018 fn
->f_periodic_stat
= MAPLE_PERIODIC_INQ
;
1019 u
->getcond_func_set
|= MAPLE_FUNC(func
);
1022 if (fn
->f_periodic_stat
== MAPLE_PERIODIC_INQ
)
1023 TAILQ_REMOVE(&sc
->sc_periodicq
, fn
, f_periodicq
);
1024 else if (fn
->f_periodic_stat
== MAPLE_PERIODIC_DEFERED
)
1025 TAILQ_REMOVE(&sc
->sc_periodicdeferq
, fn
, f_periodicq
);
1026 fn
->f_periodic_stat
= MAPLE_PERIODIC_NONE
;
1027 u
->getcond_func_set
&= ~MAPLE_FUNC(func
);
1032 * queue periodic GETCOND
1035 maple_send_defered_periodic(struct maple_softc
*sc
)
1037 struct maple_unit
*u
;
1038 struct maple_func
*fn
, *nextfn
;
1039 int defer_remain
= 0;
1041 for (fn
= TAILQ_FIRST(&sc
->sc_periodicdeferq
); fn
; fn
= nextfn
) {
1042 KASSERT(fn
->f_periodic_stat
== MAPLE_PERIODIC_DEFERED
);
1044 nextfn
= TAILQ_NEXT(fn
, f_periodicq
);
1047 if (u
->u_dma_stat
== MAPLE_DMA_IDLE
||
1048 u
->u_dma_stat
== MAPLE_DMA_RETRY
) {
1050 * if IDLE -> queue this request
1051 * if RETRY -> the unit never be freed until the next
1052 * periodic timing, so just restore to
1053 * the normal periodic queue.
1055 TAILQ_REMOVE(&sc
->sc_periodicdeferq
, fn
, f_periodicq
);
1056 TAILQ_INSERT_TAIL(&sc
->sc_periodicq
, fn
, f_periodicq
);
1057 fn
->f_periodic_stat
= MAPLE_PERIODIC_INQ
;
1059 if (u
->u_dma_stat
== MAPLE_DMA_IDLE
) {
1061 * queue periodic command
1063 fn
->f_work
= htobe32(MAPLE_FUNC(fn
->f_funcno
));
1064 maple_write_command(sc
, u
,
1065 MAPLE_COMMAND_GETCOND
, 1, &fn
->f_work
);
1066 u
->u_dma_stat
= MAPLE_DMA_PERIODIC
;
1067 u
->u_dma_func
= fn
->f_funcno
;
1074 return defer_remain
;
1078 maple_send_periodic(struct maple_softc
*sc
)
1080 struct maple_unit
*u
;
1081 struct maple_func
*fn
, *nextfn
;
1083 for (fn
= TAILQ_FIRST(&sc
->sc_periodicq
); fn
; fn
= nextfn
) {
1084 KASSERT(fn
->f_periodic_stat
== MAPLE_PERIODIC_INQ
);
1086 nextfn
= TAILQ_NEXT(fn
, f_periodicq
);
1089 if (u
->u_dma_stat
!= MAPLE_DMA_IDLE
) {
1090 if (u
->u_dma_stat
!= MAPLE_DMA_RETRY
) {
1092 * can't be queued --- move to defered queue
1094 TAILQ_REMOVE(&sc
->sc_periodicq
, fn
,
1096 TAILQ_INSERT_TAIL(&sc
->sc_periodicdeferq
, fn
,
1098 fn
->f_periodic_stat
= MAPLE_PERIODIC_DEFERED
;
1102 * queue periodic command
1104 fn
->f_work
= htobe32(MAPLE_FUNC(fn
->f_funcno
));
1105 maple_write_command(sc
, u
, MAPLE_COMMAND_GETCOND
,
1107 u
->u_dma_stat
= MAPLE_DMA_PERIODIC
;
1108 u
->u_dma_func
= fn
->f_funcno
;
1114 maple_remove_from_queues(struct maple_softc
*sc
, struct maple_unit
*u
)
1117 /* remove from queues */
1118 if (u
->u_queuestat
== MAPLE_QUEUE_PROBE
)
1119 TAILQ_REMOVE(&sc
->sc_probeq
, u
, u_q
);
1120 else if (u
->u_queuestat
== MAPLE_QUEUE_PING
)
1121 TAILQ_REMOVE(&sc
->sc_pingq
, u
, u_q
);
1123 else if (u
->u_queuestat
!= MAPLE_QUEUE_NONE
)
1124 panic("maple_remove_from_queues: queuestat %d", u
->u_queuestat
);
1126 #if defined(MAPLE_DEBUG) && MAPLE_DEBUG > 2
1127 if (u
->u_queuestat
!= MAPLE_QUEUE_NONE
) {
1129 printf("%s: dequeued\n",
1130 maple_unit_name(buf
, u
->port
, u
->subunit
));
1134 u
->u_queuestat
= MAPLE_QUEUE_NONE
;
1138 * retry current command at next periodic timing
1141 maple_retry(struct maple_softc
*sc
, struct maple_unit
*u
,
1142 enum maple_dma_stat st
)
1145 KASSERT(st
!= MAPLE_DMA_IDLE
&& st
!= MAPLE_DMA_RETRY
);
1147 #if defined(MAPLE_DEBUG) && MAPLE_DEBUG > 2
1148 if (u
->u_retrycnt
== 0) {
1150 printf("%s: retrying: %#x, %#x, %p\n",
1151 maple_unit_name(buf
, u
->port
, u
->subunit
),
1152 u
->u_command
, u
->u_datalen
, u
->u_dataaddr
);
1155 if (u
->u_retrycnt
>= MAPLE_RETRY_MAX
)
1160 u
->u_saved_dma_stat
= st
;
1161 u
->u_dma_stat
= MAPLE_DMA_RETRY
; /* no new command before retry done */
1162 SIMPLEQ_INSERT_TAIL(&sc
->sc_retryq
, u
, u_dmaq
);
1168 maple_queue_retry(struct maple_softc
*sc
)
1170 struct maple_unit
*u
, *nextu
;
1173 * Note: since the queue element is queued immediately
1174 * in maple_queue_command, we can't use SIMPLEQ_FOREACH.
1176 for (u
= SIMPLEQ_FIRST(&sc
->sc_retryq
); u
; u
= nextu
) {
1177 nextu
= SIMPLEQ_NEXT(u
, u_dmaq
);
1180 * Retrying is in the highest priority, and the unit shall
1183 KASSERT(u
->u_dma_stat
== MAPLE_DMA_RETRY
);
1184 maple_queue_command(sc
, u
, u
->u_command
, u
->u_datalen
,
1186 u
->u_dma_stat
= u
->u_saved_dma_stat
;
1189 KASSERT(u
->u_saved_dma_stat
!= MAPLE_DMA_IDLE
);
1190 u
->u_saved_dma_stat
= MAPLE_DMA_IDLE
;
1193 SIMPLEQ_INIT(&sc
->sc_retryq
);
1197 * Process DMA results.
1198 * Requires kernel context.
1201 maple_check_responses(struct maple_softc
*sc
)
1203 struct maple_unit
*u
, *nextu
;
1204 struct maple_func
*fn
;
1205 maple_response_t response
;
1211 * Note: since the queue element may be queued immediately,
1212 * we can't use SIMPLEQ_FOREACH.
1214 for (u
= SIMPLEQ_FIRST(&sc
->sc_dmaq
), maple_begin_txbuf(sc
);
1216 nextu
= SIMPLEQ_NEXT(u
, u_dmaq
);
1218 if (u
->u_dma_stat
== MAPLE_DMA_IDLE
)
1219 continue; /* just detached or DDB was active */
1222 * check for retransmission
1224 if ((response
= u
->u_rxbuf
[0]) == MAPLE_RESPONSE_AGAIN
) {
1225 if (maple_retry(sc
, u
, u
->u_dma_stat
) == 0)
1227 /* else pass error to upper layer */
1230 len
= (u
->u_rxbuf
[0] >> 24); /* length in long */
1231 len
<<= 2; /* length in byte */
1236 if (u
->u_dma_stat
== MAPLE_DMA_PERIODIC
) {
1240 u
->u_dma_stat
= MAPLE_DMA_IDLE
;
1241 func_code
= u
->u_dma_func
;
1242 if (response
== MAPLE_RESPONSE_DATATRF
&& len
> 0 &&
1243 be32toh(u
->u_rxbuf
[1]) == MAPLE_FUNC(func_code
)) {
1244 fn
= &u
->u_func
[func_code
];
1246 (*fn
->f_callback
)(fn
->f_arg
,
1247 (void *)u
->u_rxbuf
, len
,
1248 MAPLE_FLAG_PERIODIC
);
1249 } else if (response
== MAPLE_RESPONSE_NONE
) {
1252 #if defined(MAPLE_DEBUG) && MAPLE_DEBUG > 2
1253 printf("%s: func: %d: periodic response %d\n",
1254 maple_unit_name(buf
, u
->port
, u
->subunit
),
1259 * Some 3rd party devices sometimes
1262 if (maple_retry(sc
, u
, MAPLE_DMA_PERIODIC
))
1263 maple_detach_unit(sc
, u
);
1265 /* XXX check unexpected conditions? */
1267 } else if (u
->u_dma_stat
== MAPLE_DMA_PROBE
) {
1268 KASSERT(u
->u_queuestat
== MAPLE_QUEUE_NONE
);
1269 u
->u_dma_stat
= MAPLE_DMA_IDLE
;
1272 case MAPLE_RESPONSE_NONE
:
1274 * Do not use maple_retry(), which conflicts
1275 * with probe structure.
1277 if (u
->subunit
!= 0 &&
1278 ++u
->u_proberetry
> MAPLE_PROBERETRY_MAX
) {
1279 printf("%s: no response\n",
1280 maple_unit_name(buf
,
1281 u
->port
, u
->subunit
));
1284 #if defined(MAPLE_DEBUG) && MAPLE_DEBUG > 2
1285 printf("%s: queued to probe 4\n",
1286 maple_unit_name(buf
, u
->port
, u
->subunit
));
1288 TAILQ_INSERT_TAIL(&sc
->sc_probeq
, u
,
1290 u
->u_queuestat
= MAPLE_QUEUE_PROBE
;
1293 case MAPLE_RESPONSE_DEVINFO
:
1294 /* check if the unit is changed */
1295 maple_check_unit_change(sc
, u
);
1299 } else if (u
->u_dma_stat
== MAPLE_DMA_PING
) {
1300 KASSERT(u
->u_queuestat
== MAPLE_QUEUE_NONE
);
1301 u
->u_dma_stat
= MAPLE_DMA_IDLE
;
1304 case MAPLE_RESPONSE_NONE
:
1306 * Some 3rd party devices sometimes
1309 if (maple_retry(sc
, u
, MAPLE_DMA_PING
)) {
1311 #if defined(MAPLE_DEBUG) && MAPLE_DEBUG > 1
1312 printf("%s: ping response %d\n",
1313 maple_unit_name(buf
, u
->port
,
1317 #ifdef MAPLE_MEMCARD_PING_HACK
1319 == MAPLE_PING_MEMCARD
) {
1321 * The unit claims itself to be
1322 * a Visual Memory, and has
1323 * never responded to GETCOND.
1324 * Try again using MINFO, in
1325 * case it is a poorly
1326 * implemented 3rd party card.
1329 printf("%s: switching ping method\n",
1330 maple_unit_name(buf
,
1331 u
->port
, u
->subunit
));
1335 TAILQ_INSERT_TAIL(&sc
->sc_pingq
,
1340 #endif /* MAPLE_MEMCARD_PING_HACK */
1341 maple_detach_unit(sc
, u
);
1344 case MAPLE_RESPONSE_BADCMD
:
1345 case MAPLE_RESPONSE_BADFUNC
:
1346 case MAPLE_RESPONSE_DATATRF
:
1347 TAILQ_INSERT_TAIL(&sc
->sc_pingq
, u
, u_q
);
1348 u
->u_queuestat
= MAPLE_QUEUE_PING
;
1349 #ifdef MAPLE_MEMCARD_PING_HACK
1351 * If the unit responds to GETCOND, it is a
1352 * normal implementation.
1354 if (u
->u_ping_stat
== MAPLE_PING_MEMCARD
)
1355 u
->u_ping_stat
= MAPLE_PING_NORMAL
;
1362 * Note: Do not rely on the consistency of responses.
1365 if (response
== MAPLE_RESPONSE_NONE
) {
1366 if (maple_retry(sc
, u
, u
->u_dma_stat
)) {
1368 #if defined(MAPLE_DEBUG) && MAPLE_DEBUG > 1
1369 printf("%s: command response %d\n",
1370 maple_unit_name(buf
, u
->port
,
1374 maple_detach_unit(sc
, u
);
1379 flags
= (u
->u_dma_stat
== MAPLE_DMA_PCMD
) ?
1380 MAPLE_FLAG_CMD_PERIODIC_TIMING
: 0;
1381 u
->u_dma_stat
= MAPLE_DMA_IDLE
;
1383 func_code
= u
->u_dma_func
;
1384 fn
= &u
->u_func
[func_code
];
1385 if (fn
->f_dev
== NULL
) {
1386 /* detached right now */
1388 printf("%s: unknown function: function %d, response %d\n",
1389 maple_unit_name(buf
, u
->port
, u
->subunit
),
1390 func_code
, response
);
1394 if (fn
->f_callback
!= NULL
) {
1395 (*fn
->f_callback
)(fn
->f_arg
,
1396 (void *)u
->u_rxbuf
, len
, flags
);
1401 * check for subunit change and schedule probing subunits
1403 if (u
->subunit
== 0 && response
!= MAPLE_RESPONSE_NONE
&&
1404 response
!= MAPLE_RESPONSE_AGAIN
&&
1405 ((int8_t *) u
->u_rxbuf
)[2] != sc
->sc_port_unit_map
[u
->port
])
1406 maple_check_subunit_change(sc
, u
);
1411 * Main Maple Bus thread
1414 maple_event_thread(void *arg
)
1416 struct maple_softc
*sc
= arg
;
1417 unsigned cnt
= 1; /* timing counter */
1419 #if defined(MAPLE_DEBUG) && MAPLE_DEBUG > 1
1424 printf("%s: forked event thread, pid %d\n",
1425 sc
->sc_dev
.dv_xname
, sc
->event_thread
->l_proc
->p_pid
);
1428 /* begin first DMA cycle */
1429 maple_begin_txbuf(sc
);
1433 /* OK, continue booting system */
1435 config_pending_decr();
1442 /* queue async commands */
1443 if (!TAILQ_EMPTY(&sc
->sc_acmdq
))
1444 maple_queue_cmds(sc
, &sc
->sc_acmdq
);
1446 /* send defered periodic command */
1447 if (!TAILQ_EMPTY(&sc
->sc_periodicdeferq
))
1448 maple_send_defered_periodic(sc
);
1450 /* queue periodic commands */
1452 /* queue commands on periodic timing */
1453 if (!TAILQ_EMPTY(&sc
->sc_pcmdq
))
1454 maple_queue_cmds(sc
, &sc
->sc_pcmdq
);
1457 if (!SIMPLEQ_EMPTY(&sc
->sc_retryq
))
1458 maple_queue_retry(sc
);
1460 if ((cnt
& 31) == 0) /* XXX */
1461 maple_unit_probe(sc
);
1464 maple_send_periodic(sc
);
1465 if ((cnt
& 7) == 0) /* XXX */
1466 maple_unit_ping(sc
);
1469 * schedule periodic event
1472 callout_reset(&sc
->maple_callout_ch
,
1473 MAPLE_CALLOUT_TICKS
, maple_callout
, sc
);
1476 if (maple_end_txbuf(sc
)) {
1485 * wait until DMA done
1487 if (tsleep(&sc
->sc_dmadone
, PWAIT
, "mdma", hz
)
1489 /* was DDB active? */
1490 printf("%s: timed out\n", sc
->sc_dev
.dv_xname
);
1497 maple_check_responses(sc
);
1498 #if defined(MAPLE_DEBUG) && MAPLE_DEBUG > 1
1502 #if defined(MAPLE_DEBUG) && MAPLE_DEBUG > 1
1504 /* weird if occurs in succession */
1505 #if MAPLE_DEBUG <= 2
1506 if (noreq
) /* ignore first time */
1508 printf("%s: no request %d\n",
1509 sc
->sc_dev
.dv_xname
, noreq
);
1518 if (TAILQ_EMPTY(&sc
->sc_acmdq
) && sc
->sc_event
== 0 &&
1519 TAILQ_EMPTY(&sc
->sc_periodicdeferq
)) {
1520 if (tsleep(&sc
->sc_event
, PWAIT
, "mslp", hz
)
1522 printf("%s: event timed out\n",
1523 sc
->sc_dev
.dv_xname
);
1531 #if 0 /* maple root device can't be detached */
1538 maple_intr(void *arg
)
1540 struct maple_softc
*sc
= arg
;
1542 wakeup(&sc
->sc_dmadone
);
1548 maple_callout(void *ctx
)
1550 struct maple_softc
*sc
= ctx
;
1552 sc
->sc_event
= 1; /* mark as periodic event */
1553 wakeup(&sc
->sc_event
);
1557 * Install callback handler (called by drivers)
1561 maple_set_callback(struct device
*dev
, struct maple_unit
*u
, int func
,
1562 void (*callback
)(void *, struct maple_response
*, int, int), void *arg
)
1564 #if 0 /* currently unused */
1565 struct maple_softc
*sc
= (void *) dev
;
1567 struct maple_func
*fn
;
1569 KASSERT(func
>= 0 && func
< MAPLE_NFUNC
);
1571 fn
= &u
->u_func
[func
];
1573 fn
->f_callback
= callback
;
1578 * Return function definition data (called by drivers)
1581 maple_get_function_data(struct maple_devinfo
*devinfo
, int function_code
)
1586 func
= be32toh(devinfo
->di_func
);
1587 for (i
= 31; i
>= 0; --i
)
1588 if (func
& MAPLE_FUNC(i
)) {
1589 if (function_code
== i
)
1590 return be32toh(devinfo
->di_function_data
[p
]);
1599 /* Generic maple device interface */
1602 mapleopen(dev_t dev
, int flag
, int mode
, struct lwp
*l
)
1604 struct maple_softc
*sc
;
1606 sc
= device_lookup_private(&maple_cd
, MAPLEBUSUNIT(dev
));
1607 if (sc
== NULL
) /* make sure it was attached */
1610 if (MAPLEPORT(dev
) >= MAPLE_PORTS
)
1613 if (MAPLESUBUNIT(dev
) >= MAPLE_SUBUNITS
)
1616 if (!(sc
->sc_port_units
[MAPLEPORT(dev
)] & (1 << MAPLESUBUNIT(dev
))))
1619 sc
->sc_port_units_open
[MAPLEPORT(dev
)] |= 1 << MAPLESUBUNIT(dev
);
1625 mapleclose(dev_t dev
, int flag
, int mode
, struct lwp
*l
)
1627 struct maple_softc
*sc
;
1629 sc
= device_lookup_private(&maple_cd
, MAPLEBUSUNIT(dev
));
1631 sc
->sc_port_units_open
[MAPLEPORT(dev
)] &= ~(1 << MAPLESUBUNIT(dev
));
1637 maple_unit_ioctl(struct device
*dev
, struct maple_unit
*u
, u_long cmd
,
1638 void *data
, int flag
, struct lwp
*l
)
1640 struct maple_softc
*sc
= (struct maple_softc
*)dev
;
1642 if (!(sc
->sc_port_units
[u
->port
] & (1 << u
->subunit
)))
1646 case MAPLEIO_GDEVINFO
:
1647 memcpy(data
, &u
->devinfo
, sizeof(struct maple_devinfo
));
1650 return EPASSTHROUGH
;
1657 mapleioctl(dev_t dev
, u_long cmd
, void *data
, int flag
, struct lwp
*l
)
1659 struct maple_softc
*sc
;
1660 struct maple_unit
*u
;
1662 sc
= device_lookup_private(&maple_cd
, MAPLEBUSUNIT(dev
));
1663 u
= &sc
->sc_unit
[MAPLEPORT(dev
)][MAPLESUBUNIT(dev
)];
1665 return maple_unit_ioctl(&sc
->sc_dev
, u
, cmd
, data
, flag
, l
);