Sync usage with man page.
[netbsd-mini2440.git] / sys / arch / acorn26 / iobus / ioc.c
bloba836337916e4bd2edb3beff2fdbc817754ffe95d
1 /* $NetBSD: ioc.c,v 1.18 2009/01/18 20:31:08 bjh21 Exp $ */
3 /*-
4 * Copyright (c) 1998, 1999, 2000 Ben Harris
5 * All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 * ioc.c - Acorn/ARM I/O Controller (Albion/VC2311/VL2311/VY86C410)
33 #include <sys/cdefs.h>
34 __KERNEL_RCSID(0, "$NetBSD: ioc.c,v 1.18 2009/01/18 20:31:08 bjh21 Exp $");
36 #include <sys/param.h>
37 #include <sys/device.h>
38 #include <sys/kernel.h>
39 #include <sys/queue.h>
40 #include <sys/reboot.h> /* For bootverbose */
41 #include <sys/systm.h>
42 #include <sys/timetc.h>
44 #include <machine/bus.h>
45 #include <machine/intr.h>
46 #include <machine/irq.h>
48 #include <arch/acorn26/acorn26/cpuvar.h>
49 #include <arch/acorn26/iobus/iobusvar.h>
50 #include <arch/acorn26/iobus/iocvar.h>
51 #include <arch/acorn26/iobus/iocreg.h>
53 #include "locators.h"
55 static int ioc_match(device_t parent, cfdata_t cf, void *aux);
56 static void ioc_attach(device_t parent, device_t self, void *aux);
57 static int ioc_search(device_t parent, cfdata_t cf,
58 const int *ldesc, void *aux);
59 static int ioc_print(void *aux, const char *pnp);
60 static int ioc_irq_clock(void *cookie);
61 static int ioc_irq_statclock(void *cookie);
62 static u_int ioc_get_timecount(struct timecounter *);
64 CFATTACH_DECL_NEW(ioc, sizeof(struct ioc_softc),
65 ioc_match, ioc_attach, NULL, NULL);
67 device_t the_ioc;
70 * Autoconfiguration glue
73 static int
74 ioc_match(device_t parent, cfdata_t cf, void *aux)
78 * This is tricky. Accessing non-existent devices in iobus
79 * space can hang the machine (MEMC datasheet section 5.3.3),
80 * so probes would have to be very delicate. This isn't
81 * _much_ of a problem with the IOC, since all machines I know
82 * of have exactly one.
84 if (the_ioc == NULL)
85 return 1;
86 return 0;
89 static void
90 ioc_attach(device_t parent, device_t self, void *aux)
92 struct ioc_softc *sc = device_private(self);
93 struct iobus_attach_args *ioa = aux;
94 bus_space_tag_t bst;
95 bus_space_handle_t bsh;
97 sc->sc_dev = the_ioc = self;
98 sc->sc_bst = ioa->ioa_tag;
99 if (bus_space_map(ioa->ioa_tag, ioa->ioa_base, 0x00200000,
100 0, &(sc->sc_bsh)) != 0)
101 panic("%s: couldn't map", device_xname(self));
102 bst = sc->sc_bst;
103 bsh = sc->sc_bsh;
104 /* Now we need to set up bits of the IOC */
105 /* Control register: All bits high (input) is probably safe */
106 ioc_ctl_write(self, 0xff, 0xff);
108 * IRQ/FIQ: mask out all, leave clearing latched interrupts
109 * till someone asks.
111 ioc_irq_setmask(0);
112 ioc_fiq_setmask(0);
114 * Timers:
115 * Timers 0/1 are set up by ioc_initclocks (called by cpu_initclocks).
116 * XXX What if we need timers before then?
117 * Timer 2 is set up by whatever's connected to BAUD.
118 * Timer 3 is set up by the arckbd driver.
120 aprint_normal("\n");
122 config_search_ia(ioc_search, self, "ioc", NULL);
125 extern struct bus_space ioc_bs_tag;
127 static int
128 ioc_search(device_t parent, cfdata_t cf, const int *ldesc, void *aux)
130 struct ioc_softc *sc = device_private(parent);
131 struct ioc_attach_args ioc;
132 bus_space_tag_t bst = sc->sc_bst;
133 bus_space_handle_t bsh = sc->sc_bsh;
135 ioc.ioc_bank = cf->cf_loc[IOCCF_BANK];
136 ioc.ioc_offset = cf->cf_loc[IOCCF_OFFSET];
137 ioc.ioc_slow_t = bst;
138 bus_space_subregion(bst, bsh, (ioc.ioc_bank << IOC_BANK_SHIFT)
139 + (IOC_TYPE_SLOW << IOC_TYPE_SHIFT)
140 + (ioc.ioc_offset >> 2),
141 1 << IOC_BANK_SHIFT, &ioc.ioc_slow_h);
142 ioc.ioc_medium_t = bst;
143 bus_space_subregion(bst, bsh, (ioc.ioc_bank << IOC_BANK_SHIFT)
144 + (IOC_TYPE_MEDIUM << IOC_TYPE_SHIFT)
145 + (ioc.ioc_offset >> 2),
146 1 << IOC_BANK_SHIFT, &ioc.ioc_medium_h);
147 ioc.ioc_fast_t = bst;
148 bus_space_subregion(bst, bsh, (ioc.ioc_bank << IOC_BANK_SHIFT)
149 + (IOC_TYPE_FAST << IOC_TYPE_SHIFT)
150 + (ioc.ioc_offset >> 2),
151 1 << IOC_BANK_SHIFT, &ioc.ioc_fast_h);
152 ioc.ioc_sync_t = bst;
153 bus_space_subregion(bst, bsh, (ioc.ioc_bank << IOC_BANK_SHIFT)
154 + (IOC_TYPE_SYNC << IOC_TYPE_SHIFT)
155 + (ioc.ioc_offset >> 2),
156 1 << IOC_BANK_SHIFT, &ioc.ioc_sync_h);
157 if (config_match(parent, cf, &ioc) > 0)
158 config_attach(parent, cf, &ioc, ioc_print);
160 return 0;
163 static int
164 ioc_print(void *aux, const char *pnp)
166 struct ioc_attach_args *ioc = aux;
168 if (ioc->ioc_bank != IOCCF_BANK_DEFAULT)
169 aprint_normal(" bank %d", ioc->ioc_bank);
170 if (ioc->ioc_offset != IOCCF_OFFSET_DEFAULT)
171 aprint_normal(" offset 0x%02x", ioc->ioc_offset);
172 return UNCONF;
176 * Find out if an interrupt line is currently active
180 ioc_irq_status(int irq)
182 struct ioc_softc *sc = device_private(the_ioc);
183 bus_space_tag_t bst = sc->sc_bst;
184 bus_space_handle_t bsh = sc->sc_bsh;
186 if (irq < 8)
187 return (bus_space_read_1(bst, bsh, IOC_IRQSTA) &
188 IOC_IRQA_BIT(irq)) != 0;
189 else
190 return (bus_space_read_1(bst, bsh, IOC_IRQSTB) &
191 IOC_IRQB_BIT(irq)) != 0;
194 u_int32_t
195 ioc_irq_status_full(void)
197 struct ioc_softc *sc = device_private(the_ioc);
198 bus_space_tag_t bst = sc->sc_bst;
199 bus_space_handle_t bsh = sc->sc_bsh;
201 #if 0 /* XXX */
202 printf("IRQ mask: 0x%x\n",
203 bus_space_read_1(bst, bsh, IOC_IRQMSKA) |
204 (bus_space_read_1(bst, bsh, IOC_IRQMSKB) << 8));
205 #endif
206 return bus_space_read_1(bst, bsh, IOC_IRQRQA) |
207 (bus_space_read_1(bst, bsh, IOC_IRQRQB) << 8);
210 void
211 ioc_irq_setmask(u_int32_t mask)
213 struct ioc_softc *sc = device_private(the_ioc);
214 bus_space_tag_t bst = sc->sc_bst;
215 bus_space_handle_t bsh = sc->sc_bsh;
217 bus_space_write_1(bst, bsh, IOC_IRQMSKA, mask & 0xff);
218 bus_space_write_1(bst, bsh, IOC_IRQMSKB, (mask >> 8) & 0xff);
221 void
222 ioc_irq_waitfor(int irq)
225 while (!ioc_irq_status(irq));
228 void
229 ioc_irq_clear(int mask)
231 struct ioc_softc *sc = device_private(the_ioc);
232 bus_space_tag_t bst = sc->sc_bst;
233 bus_space_handle_t bsh = sc->sc_bsh;
235 bus_space_write_1(bst, bsh, IOC_IRQRQA, mask);
238 #if 0
241 * ioc_get_irq_level:
243 * Find out the current level of an edge-triggered interrupt line.
244 * Useful for the VIDC driver to know if it's in VSYNC if nothing
245 * else.
248 int ioc_get_irq_level(device_t self, int irq)
250 struct ioc_softc *sc = device_private(self);
252 switch (irq) {
253 case IOC_IRQ_IF:
254 return (bus_space_read_1(sc->sc_bst, sc->sc_bsh, IOC_CTL) &
255 IOC_CTL_NIF) != 0;
256 case IOC_IRQ_IR:
257 return (bus_space_read_1(sc->sc_bst, sc->sc_bsh, IOC_CTL) &
258 IOC_CTL_IR) != 0;
260 panic("ioc_get_irq_level called for irq %d, which isn't edge-triggered",
261 irq);
264 #endif /* 0 */
267 * FIQs
270 void
271 ioc_fiq_setmask(u_int32_t mask)
273 struct ioc_softc *sc = device_private(the_ioc);
274 bus_space_tag_t bst = sc->sc_bst;
275 bus_space_handle_t bsh = sc->sc_bsh;
277 bus_space_write_1(bst, bsh, IOC_FIQMSK, mask);
283 * Counters
286 void ioc_counter_start(device_t self, int counter, int value)
288 struct ioc_softc *sc = device_private(self);
289 bus_space_tag_t bst = sc->sc_bst;
290 bus_space_handle_t bsh = sc->sc_bsh;
291 int tlow, thigh, tgo;
293 switch (counter) {
294 case 0: tlow = IOC_T0LOW; thigh = IOC_T0HIGH; tgo = IOC_T0GO; break;
295 case 1: tlow = IOC_T1LOW; thigh = IOC_T1HIGH; tgo = IOC_T1GO; break;
296 case 2: tlow = IOC_T2LOW; thigh = IOC_T2HIGH; tgo = IOC_T2GO; break;
297 case 3: tlow = IOC_T3LOW; thigh = IOC_T3HIGH; tgo = IOC_T3GO; break;
298 default: panic("%s: ioc_counter_start: bad counter (%d)",
299 device_xname(self), counter);
301 bus_space_write_1(bst, bsh, tlow, value & 0xff);
302 bus_space_write_1(bst, bsh, thigh, value >> 8 & 0xff);
303 bus_space_write_1(bst, bsh, tgo, 0);
306 /* Cache to save microtime recalculating it */
307 static int t0_count;
309 * Statistics clock interval and variance, in ticks. Variance must be a
310 * power of two. Since this gives us an even number, not an odd number,
311 * we discard one case and compensate. That is, a variance of 1024 would
312 * give us offsets in [0..1023]. Instead, we take offsets in [1..1023].
313 * This is symmetric about the point 512, or statvar/2, and thus averages
314 * to that value (assuming uniform random numbers).
316 int statvar = 8192;
317 int statmin;
319 void
320 cpu_initclocks(void)
322 struct ioc_softc *sc;
323 int minint, statint;
325 KASSERT(the_ioc != NULL);
326 sc = device_private(the_ioc);
327 stathz = hz; /* XXX what _should_ it be? */
329 if (hz == 0 || IOC_TIMER_RATE % hz != 0 ||
330 (t0_count = IOC_TIMER_RATE / hz - 1) > 65535)
331 panic("ioc_initclocks: Impossible clock rate: %d Hz", hz);
332 ioc_counter_start(the_ioc, 0, t0_count);
333 evcnt_attach_dynamic(&sc->sc_clkev, EVCNT_TYPE_INTR, NULL,
334 device_xname(sc->sc_dev), "clock");
335 sc->sc_clkirq = irq_establish(IOC_IRQ_TM0, IPL_CLOCK, ioc_irq_clock,
336 NULL, &sc->sc_clkev);
337 sc->sc_tc.tc_get_timecount = ioc_get_timecount;
338 sc->sc_tc.tc_counter_mask = ~(u_int)0;
339 sc->sc_tc.tc_frequency = IOC_TIMER_RATE;
340 sc->sc_tc.tc_name = device_xname(sc->sc_dev);
341 sc->sc_tc.tc_quality = 100;
342 sc->sc_tc.tc_priv = sc;
343 tc_init(&sc->sc_tc);
344 aprint_verbose_dev(sc->sc_dev, "%d Hz clock interrupting at %s\n",
345 hz, irq_string(sc->sc_clkirq));
347 if (stathz) {
348 profhz = stathz; /* Makes life simpler */
350 if (stathz == 0 || IOC_TIMER_RATE % stathz != 0 ||
351 (statint = IOC_TIMER_RATE / stathz - 1) > 65535)
352 panic("Impossible statclock rate: %d Hz", stathz);
354 minint = statint / 2 + 100;
355 while (statvar > minint)
356 statvar >>= 1;
357 statmin = statint - (statvar >> 1);
359 ioc_counter_start(the_ioc, 1, statint);
361 evcnt_attach_dynamic(&sc->sc_sclkev, EVCNT_TYPE_INTR, NULL,
362 device_xname(sc->sc_dev), "statclock");
363 sc->sc_sclkirq = irq_establish(IOC_IRQ_TM1, IPL_HIGH,
364 ioc_irq_statclock, NULL, &sc->sc_sclkev);
365 aprint_verbose_dev(sc->sc_dev,
366 "%d Hz statclock interrupting at %s\n",
367 stathz, irq_string(sc->sc_sclkirq));
371 static int
372 ioc_irq_clock(void *cookie)
374 struct ioc_softc *sc = device_private(the_ioc);
376 sc->sc_tcbase += t0_count + 1;
377 hardclock(cookie);
378 return IRQ_HANDLED;
381 static int
382 ioc_irq_statclock(void *cookie)
384 struct ioc_softc *sc = device_private(the_ioc);
385 bus_space_tag_t bst = sc->sc_bst;
386 bus_space_handle_t bsh = sc->sc_bsh;
387 int r, newint;
389 statclock(cookie);
391 /* Generate a new randomly-distributed clock period. */
392 do {
393 r = random() & (statvar - 1);
394 } while (r == 0);
395 newint = statmin + r;
398 * Load the next clock period into the latch, but don't do anything
399 * with it. It'll be used for the _next_ statclock reload.
401 bus_space_write_1(bst, bsh, IOC_T1LOW, newint & 0xff);
402 bus_space_write_1(bst, bsh, IOC_T1HIGH, newint >> 8 & 0xff);
403 return IRQ_HANDLED;
406 void
407 setstatclockrate(int hzrate)
410 /* Nothing to do here -- we've forced stathz == profhz above. */
411 KASSERT(hzrate == stathz);
415 * IOC timecounter
417 * We construct a timecounter from timer 0, which is also running the
418 * hardclock interrupt. Since the timer 0 resets on every hardclock
419 * interrupt, we keep track of the high-order bits of the counter in
420 * software, incrementing it on every hardclock. If hardclock
421 * interrupts are disabled, there's a period where the timer has reset
422 * but the interrupt handler hasn't incremented the hight-order bits.
423 * We detect this by checking whether there's a hardclock interrupt
424 * pending. We take a bit of extra care to ensure that we aren't
425 * confused by the interrupt happening between our latching the
426 * timer's count and reading the interrupt flag.
428 static u_int
429 ioc_get_timecount(struct timecounter *tc)
431 struct ioc_softc *sc = tc->tc_priv;
432 bus_space_tag_t bst = sc->sc_bst;
433 bus_space_handle_t bsh = sc->sc_bsh;
434 u_int t0, count;
435 int s, intpending;
437 s = splclock();
438 bus_space_write_1(bst, bsh, IOC_T0LATCH, 0);
439 if (__predict_false((intpending = ioc_irq_status(IOC_IRQ_TM0))))
440 bus_space_write_1(bst, bsh, IOC_T0LATCH, 0);
441 t0 = bus_space_read_1(bst, bsh, IOC_T0LOW);
442 t0 += bus_space_read_1(bst, bsh, IOC_T0HIGH) << 8;
443 count = sc->sc_tcbase - t0;
444 if (intpending)
445 count += t0_count + 1;
446 splx(s);
447 return count;
450 void
451 delay(u_int usecs)
454 if (usecs <= 10 || cold)
455 cpu_delayloop(usecs * cpu_delay_factor);
456 else {
457 struct timeval start, gap, now, end;
459 microtime(&start);
460 gap.tv_sec = usecs / 1000000;
461 gap.tv_usec = usecs % 1000000;
462 timeradd(&start, &gap, &end);
463 do {
464 microtime(&now);
465 } while (timercmp(&now, &end, <));