1 /* $NetBSD: ixp425_qmgr.c,v 1.5 2009/03/05 01:38:12 msaitoh Exp $ */
4 * Copyright (c) 2006 Sam Leffler, Errno Consulting
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer,
12 * without modification.
13 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
14 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
15 * redistribution must be conditioned upon including a substantially
16 * similar Disclaimer requirement for further binary redistribution.
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
22 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
23 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
24 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
27 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
29 * THE POSSIBILITY OF SUCH DAMAGES.
33 * Copyright (c) 2001-2005, Intel Corporation.
34 * All rights reserved.
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. Neither the name of the Intel Corporation nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
49 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS''
50 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
53 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
61 #include <sys/cdefs.h>
62 /*__FBSDID("$FreeBSD: src/sys/arm/xscale/ixp425/ixp425_qmgr.c,v 1.1 2006/11/19 23:55:23 sam Exp $");*/
63 __KERNEL_RCSID(0, "$NetBSD: ixp425_qmgr.c,v 1.5 2009/03/05 01:38:12 msaitoh Exp $");
66 * Intel XScale Queue Manager support.
68 * Each IXP4XXX device has a hardware block that implements a priority
69 * queue manager that is shared between the XScale cpu and the backend
70 * devices (such as the NPE). Queues are accessed by reading/writing
71 * special memory locations. The queue contents are mapped into a shared
72 * SRAM region with entries managed in a circular buffer. The XScale
73 * processor can receive interrupts based on queue contents (a condition
74 * code determines when interrupts should be delivered).
76 * The code here basically replaces the qmgr class in the Intel Access
79 #include <sys/param.h>
80 #include <sys/systm.h>
81 #include <sys/kernel.h>
83 #include <sys/malloc.h>
84 #include <sys/resource.h>
86 #include <machine/bus.h>
87 #include <machine/cpu.h>
88 #include <machine/intr.h>
90 #include <arm/xscale/ixp425reg.h>
91 #include <arm/xscale/ixp425var.h>
93 #include <arm/xscale/ixp425_qmgr.h>
96 * State per AQM hw queue.
97 * This structure holds q configuration and dispatch state.
100 int qSizeInWords
; /* queue size in words */
102 uint32_t qOflowStatBitMask
; /* overflow status mask */
103 int qWriteCount
; /* queue write count */
105 bus_size_t qAccRegAddr
; /* access register */
106 bus_size_t qUOStatRegAddr
; /* status register */
107 bus_size_t qConfigRegAddr
; /* config register */
108 int qSizeInEntries
; /* queue size in entries */
110 uint32_t qUflowStatBitMask
; /* underflow status mask */
111 int qReadCount
; /* queue read count */
114 uint32_t qStatRegAddr
;
115 uint32_t qStatBitsOffset
;
116 uint32_t qStat0BitMask
;
117 uint32_t qStat1BitMask
;
119 uint32_t intRegCheckMask
; /* interrupt reg check mask */
120 void (*cb
)(int, void *); /* callback function */
121 void *cbarg
; /* callback argument */
122 int priority
; /* dispatch priority */
124 /* NB: needed only for A0 parts */
125 u_int statusWordOffset
; /* status word offset */
126 uint32_t statusMask
; /* status mask */
127 uint32_t statusCheckValue
; /* status check value */
131 struct ixpqmgr_softc
{
134 bus_space_tag_t sc_iot
;
135 bus_space_handle_t sc_ioh
;
136 struct resource
*sc_irq
; /* IRQ resource */
137 int sc_rid
; /* resource id for irq */
138 void *sc_ih
; /* interrupt handler */
140 bus_space_tag_t sc_iot
;
141 bus_space_handle_t sc_ioh
;
142 void *sc_ih
[2]; /* interrupt handler */
145 struct qmgrInfo qinfo
[IX_QMGR_MAX_NUM_QUEUES
];
147 * This array contains a list of queue identifiers ordered by
148 * priority. The table is split logically between queue
149 * identifiers 0-31 and 32-63. To optimize lookups bit masks
150 * are kept for the first-32 and last-32 q's. When the
151 * table needs to be rebuilt mark rebuildTable and it'll
152 * happen after the next interrupt.
154 int priorityTable
[IX_QMGR_MAX_NUM_QUEUES
];
155 uint32_t lowPriorityTableFirstHalfMask
;
156 uint32_t uppPriorityTableFirstHalfMask
;
157 int rebuildTable
; /* rebuild priorityTable */
159 uint32_t aqmFreeSramAddress
; /* SRAM free space */
162 static int qmgr_debug
= 0;
163 #define DPRINTF(dev, fmt, ...) do { \
164 if (qmgr_debug) printf(fmt, __VA_ARGS__); \
166 #define DPRINTFn(n, dev, fmt, ...) do { \
167 if (qmgr_debug >= n) printf(fmt, __VA_ARGS__); \
170 static struct ixpqmgr_softc
*ixpqmgr_sc
= NULL
;
172 static void ixpqmgr_rebuild(struct ixpqmgr_softc
*);
173 static int ixpqmgr_intr(void *);
175 static void aqm_int_enable(struct ixpqmgr_softc
*sc
, int qId
);
176 static void aqm_int_disable(struct ixpqmgr_softc
*sc
, int qId
);
177 static void aqm_qcfg(struct ixpqmgr_softc
*sc
, int qId
, u_int ne
, u_int nf
);
178 static void aqm_srcsel_write(struct ixpqmgr_softc
*sc
, int qId
, int sourceId
);
179 static void aqm_reset(struct ixpqmgr_softc
*sc
);
182 dummyCallback(int qId
, void *arg
)
188 aqm_reg_read(struct ixpqmgr_softc
*sc
, bus_size_t off
)
190 DPRINTFn(9, sc
->sc_dev
, "%s(0x%x)\n", __func__
, (int)off
);
191 return bus_space_read_4(sc
->sc_iot
, sc
->sc_ioh
, off
);
195 aqm_reg_write(struct ixpqmgr_softc
*sc
, bus_size_t off
, uint32_t val
)
197 DPRINTFn(9, sc
->sc_dev
, "%s(0x%x, 0x%x)\n", __func__
, (int)off
, val
);
198 bus_space_write_4(sc
->sc_iot
, sc
->sc_ioh
, off
, val
);
203 ixpqmgr_probe(device_t dev
)
205 device_set_desc(dev
, "IXP425 Q-Manager");
212 ixpqmgr_attach(device_t dev
)
215 ixpqmgr_init(bus_space_tag_t iot
)
219 struct ixpqmgr_softc
*sc
= device_get_softc(dev
);
220 struct ixp425_softc
*sa
= device_get_softc(device_get_parent(dev
));
222 struct ixpqmgr_softc
*sc
;
230 sc
->sc_iot
= sa
->sc_iot
;
232 sc
= malloc(sizeof(*sc
), M_DEVBUF
, M_NOWAIT
| M_ZERO
);
239 if (bus_space_map(sc
->sc_iot
, IXP425_QMGR_HWBASE
, IXP425_QMGR_SIZE
,
241 panic("%s: Cannot map registers", __func__
);
244 /* NB: we only use the lower 32 q's */
245 sc
->sc_irq
= bus_alloc_resource(dev
, SYS_RES_IRQ
, &sc
->sc_rid
,
246 IXP425_INT_QUE1_32
, IXP425_INT_QUE33_64
, 2, RF_ACTIVE
);
248 panic("Unable to allocate the qmgr irqs.\n");
249 /* XXX could be a source of entropy */
250 bus_setup_intr(dev
, sc
->sc_irq
, INTR_TYPE_NET
| INTR_MPSAFE
,
251 ixpqmgr_intr
, NULL
, &sc
->sc_ih
);
254 sc
->sc_ih
[0] = ixp425_intr_establish(IXP425_INT_QUE1_32
, IPL_NET
,
256 if (sc
->sc_ih
[0] == NULL
) {
261 sc
->sc_ih
[1] = ixp425_intr_establish(IXP425_INT_QUE33_64
, IPL_NET
,
263 if (sc
->sc_ih
[1] == NULL
) {
264 ixp425_intr_disestablish(sc
->sc_ih
[0]);
271 /* NB: softc is pre-zero'd */
272 for (i
= 0; i
< IX_QMGR_MAX_NUM_QUEUES
; i
++) {
273 struct qmgrInfo
*qi
= &sc
->qinfo
[i
];
275 qi
->cb
= dummyCallback
;
276 qi
->priority
= IX_QMGR_Q_PRIORITY_0
; /* default priority */
278 * There are two interrupt registers, 32 bits each. One
279 * for the lower queues(0-31) and one for the upper
280 * queues(32-63). Therefore need to mod by 32 i.e the
281 * min upper queue identifier.
283 qi
->intRegCheckMask
= (1<<(i
%(IX_QMGR_MIN_QUEUPP_QID
)));
286 * Register addresses and bit masks are calculated and
287 * stored here to optimize QRead, QWrite and QStatusGet
291 /* AQM Queue access reg addresses, per queue */
292 qi
->qAccRegAddr
= IX_QMGR_Q_ACCESS_ADDR_GET(i
);
293 qi
->qAccRegAddr
= IX_QMGR_Q_ACCESS_ADDR_GET(i
);
294 qi
->qConfigRegAddr
= IX_QMGR_Q_CONFIG_ADDR_GET(i
);
296 /* AQM Queue lower-group (0-31), only */
297 if (i
< IX_QMGR_MIN_QUEUPP_QID
) {
298 /* AQM Q underflow/overflow status reg address, per queue */
299 qi
->qUOStatRegAddr
= IX_QMGR_QUEUOSTAT0_OFFSET
+
300 ((i
/ IX_QMGR_QUEUOSTAT_NUM_QUE_PER_WORD
) *
303 /* AQM Q underflow status bit masks for status reg per queue */
304 qi
->qUflowStatBitMask
=
305 (IX_QMGR_UNDERFLOW_BIT_OFFSET
+ 1) <<
306 ((i
& (IX_QMGR_QUEUOSTAT_NUM_QUE_PER_WORD
- 1)) *
307 (32 / IX_QMGR_QUEUOSTAT_NUM_QUE_PER_WORD
));
309 /* AQM Q overflow status bit masks for status reg, per queue */
310 qi
->qOflowStatBitMask
=
311 (IX_QMGR_OVERFLOW_BIT_OFFSET
+ 1) <<
312 ((i
& (IX_QMGR_QUEUOSTAT_NUM_QUE_PER_WORD
- 1)) *
313 (32 / IX_QMGR_QUEUOSTAT_NUM_QUE_PER_WORD
));
315 /* AQM Q lower-group (0-31) status reg addresses, per queue */
316 qi
->qStatRegAddr
= IX_QMGR_QUELOWSTAT0_OFFSET
+
317 ((i
/ IX_QMGR_QUELOWSTAT_NUM_QUE_PER_WORD
) *
320 /* AQM Q lower-group (0-31) status register bit offset */
321 qi
->qStatBitsOffset
=
322 (i
& (IX_QMGR_QUELOWSTAT_NUM_QUE_PER_WORD
- 1)) *
323 (32 / IX_QMGR_QUELOWSTAT_NUM_QUE_PER_WORD
);
324 } else { /* AQM Q upper-group (32-63), only */
325 qi
->qUOStatRegAddr
= 0; /* XXX */
327 /* AQM Q upper-group (32-63) Nearly Empty status reg bitmasks */
328 qi
->qStat0BitMask
= (1 << (i
- IX_QMGR_MIN_QUEUPP_QID
));
330 /* AQM Q upper-group (32-63) Full status register bitmasks */
331 qi
->qStat1BitMask
= (1 << (i
- IX_QMGR_MIN_QUEUPP_QID
));
335 sc
->aqmFreeSramAddress
= 0x100; /* Q buffer space starts at 0x2100 */
337 ixpqmgr_rebuild(sc
); /* build inital priority table */
338 aqm_reset(sc
); /* reset h/w */
345 ixpqmgr_detach(device_t dev
)
347 struct ixpqmgr_softc
*sc
= device_get_softc(dev
);
349 aqm_reset(sc
); /* disable interrupts */
350 bus_teardown_intr(dev
, sc
->sc_irq
, sc
->sc_ih
);
351 bus_release_resource(dev
, SYS_RES_IRQ
, sc
->sc_rid
, sc
->sc_irq
);
352 bus_space_unmap(sc
->sc_iot
, sc
->sc_ioh
, IXP425_QMGR_SIZE
);
357 ixpqmgr_qconfig(int qId
, int qEntries
, int ne
, int nf
, int srcSel
,
358 void (*cb
)(int, void *), void *cbarg
)
360 struct ixpqmgr_softc
*sc
= ixpqmgr_sc
;
361 struct qmgrInfo
*qi
= &sc
->qinfo
[qId
];
363 DPRINTF(sc
->sc_dev
, "%s(%u, %u, %u, %u, %u, %p, %p)\n",
364 __func__
, qId
, qEntries
, ne
, nf
, srcSel
, cb
, cbarg
);
366 /* NB: entry size is always 1 */
367 qi
->qSizeInWords
= qEntries
;
371 qi
->qSizeInEntries
= qEntries
; /* XXX kept for code clarity */
374 /* Reset to dummy callback */
375 qi
->cb
= dummyCallback
;
382 /* Write the config register; NB must be AFTER qinfo setup */
383 aqm_qcfg(sc
, qId
, ne
, nf
);
385 * Account for space just allocated to queue.
387 sc
->aqmFreeSramAddress
+= (qi
->qSizeInWords
* sizeof(uint32_t));
389 /* Set the interrupt source if this queue is in the range 0-31 */
390 if (qId
< IX_QMGR_MIN_QUEUPP_QID
)
391 aqm_srcsel_write(sc
, qId
, srcSel
);
393 if (cb
!= NULL
) /* Enable the interrupt */
394 aqm_int_enable(sc
, qId
);
396 sc
->rebuildTable
= true;
402 ixpqmgr_qwrite(int qId
, uint32_t entry
)
404 struct ixpqmgr_softc
*sc
= ixpqmgr_sc
;
405 struct qmgrInfo
*qi
= &sc
->qinfo
[qId
];
407 DPRINTFn(3, sc
->sc_dev
, "%s(%u, 0x%x) writeCount %u size %u\n",
408 __func__
, qId
, entry
, qi
->qWriteCount
, qi
->qSizeInEntries
);
410 /* write the entry */
411 aqm_reg_write(sc
, qi
->qAccRegAddr
, entry
);
413 /* NB: overflow is available for lower queues only */
414 if (qId
< IX_QMGR_MIN_QUEUPP_QID
) {
415 int qSize
= qi
->qSizeInEntries
;
417 * Increment the current number of entries in the queue
418 * and check for overflow .
420 if (qi
->qWriteCount
++ == qSize
) { /* check for overflow */
421 uint32_t status
= aqm_reg_read(sc
, qi
->qUOStatRegAddr
);
425 * Read the status twice because the status may
426 * not be immediately ready after the write operation
428 if ((status
& qi
->qOflowStatBitMask
) ||
429 ((status
= aqm_reg_read(sc
, qi
->qUOStatRegAddr
)) & qi
->qOflowStatBitMask
)) {
431 * The queue is full, clear the overflow status bit if set.
433 aqm_reg_write(sc
, qi
->qUOStatRegAddr
,
434 status
& ~qi
->qOflowStatBitMask
);
435 qi
->qWriteCount
= qSize
;
436 DPRINTFn(5, sc
->sc_dev
,
437 "%s(%u, 0x%x) Q full, overflow status cleared\n",
438 __func__
, qId
, entry
);
442 * No overflow occured : someone is draining the queue
443 * and the current counter needs to be
444 * updated from the current number of entries in the queue
447 /* calculate number of words in q */
448 qPtrs
= aqm_reg_read(sc
, qi
->qConfigRegAddr
);
449 DPRINTFn(2, sc
->sc_dev
,
450 "%s(%u, 0x%x) Q full, no overflow status, qConfig 0x%x\n",
451 __func__
, qId
, entry
, qPtrs
);
452 qPtrs
= (qPtrs
- (qPtrs
>> 7)) & 0x7f;
456 * The queue may be full at the time of the
457 * snapshot. Next access will check
458 * the overflow status again.
460 qi
->qWriteCount
= qSize
;
462 /* convert the number of words to a number of entries */
463 qi
->qWriteCount
= qPtrs
& (qSize
- 1);
471 ixpqmgr_qread(int qId
, uint32_t *entry
)
473 struct ixpqmgr_softc
*sc
= ixpqmgr_sc
;
474 struct qmgrInfo
*qi
= &sc
->qinfo
[qId
];
475 bus_size_t off
= qi
->qAccRegAddr
;
477 *entry
= aqm_reg_read(sc
, off
);
480 * Reset the current read count : next access to the read function
481 * will force a underflow status check.
485 /* Check if underflow occurred on the read */
486 if (*entry
== 0 && qId
< IX_QMGR_MIN_QUEUPP_QID
) {
487 /* get the queue status */
488 uint32_t status
= aqm_reg_read(sc
, qi
->qUOStatRegAddr
);
490 if (status
& qi
->qUflowStatBitMask
) { /* clear underflow status */
491 aqm_reg_write(sc
, qi
->qUOStatRegAddr
,
492 status
&~ qi
->qUflowStatBitMask
);
500 ixpqmgr_qreadm(int qId
, uint32_t n
, uint32_t *p
)
502 struct ixpqmgr_softc
*sc
= ixpqmgr_sc
;
503 struct qmgrInfo
*qi
= &sc
->qinfo
[qId
];
505 bus_size_t off
= qi
->qAccRegAddr
;
507 entry
= aqm_reg_read(sc
, off
);
510 /* if we read a NULL entry, stop. We have underflowed */
513 *p
++ = entry
; /* store */
514 entry
= aqm_reg_read(sc
, off
);
519 * Reset the current read count : next access to the read function
520 * will force a underflow status check.
524 /* Check if underflow occurred on the read */
525 if (entry
== 0 && qId
< IX_QMGR_MIN_QUEUPP_QID
) {
526 /* get the queue status */
527 uint32_t status
= aqm_reg_read(sc
, qi
->qUOStatRegAddr
);
529 if (status
& qi
->qUflowStatBitMask
) { /* clear underflow status */
530 aqm_reg_write(sc
, qi
->qUOStatRegAddr
,
531 status
&~ qi
->qUflowStatBitMask
);
539 ixpqmgr_getqstatus(int qId
)
541 #define QLOWSTATMASK \
542 ((1 << (32 / IX_QMGR_QUELOWSTAT_NUM_QUE_PER_WORD)) - 1)
543 struct ixpqmgr_softc
*sc
= ixpqmgr_sc
;
544 const struct qmgrInfo
*qi
= &sc
->qinfo
[qId
];
547 if (qId
< IX_QMGR_MIN_QUEUPP_QID
) {
548 /* read the status of a queue in the range 0-31 */
549 status
= aqm_reg_read(sc
, qi
->qStatRegAddr
);
551 /* mask out the status bits relevant only to this queue */
552 status
= (status
>> qi
->qStatBitsOffset
) & QLOWSTATMASK
;
553 } else { /* read status of a queue in the range 32-63 */
555 if (aqm_reg_read(sc
, IX_QMGR_QUEUPPSTAT0_OFFSET
)&qi
->qStat0BitMask
)
556 status
|= IX_QMGR_Q_STATUS_NE_BIT_MASK
; /* nearly empty */
557 if (aqm_reg_read(sc
, IX_QMGR_QUEUPPSTAT1_OFFSET
)&qi
->qStat1BitMask
)
558 status
|= IX_QMGR_Q_STATUS_F_BIT_MASK
; /* full */
565 ixpqmgr_getqconfig(int qId
)
567 struct ixpqmgr_softc
*sc
= ixpqmgr_sc
;
569 return aqm_reg_read(sc
, IX_QMGR_Q_CONFIG_ADDR_GET(qId
));
575 struct ixpqmgr_softc
*sc
= ixpqmgr_sc
;
578 /* status registers */
579 printf("0x%04x: %08x %08x %08x %08x\n"
581 , aqm_reg_read(sc
, 0x400)
582 , aqm_reg_read(sc
, 0x400+4)
583 , aqm_reg_read(sc
, 0x400+8)
584 , aqm_reg_read(sc
, 0x400+12)
586 printf("0x%04x: %08x %08x %08x %08x\n"
588 , aqm_reg_read(sc
, 0x410)
589 , aqm_reg_read(sc
, 0x410+4)
590 , aqm_reg_read(sc
, 0x410+8)
591 , aqm_reg_read(sc
, 0x410+12)
593 printf("0x%04x: %08x %08x %08x %08x\n"
595 , aqm_reg_read(sc
, 0x420)
596 , aqm_reg_read(sc
, 0x420+4)
597 , aqm_reg_read(sc
, 0x420+8)
598 , aqm_reg_read(sc
, 0x420+12)
600 printf("0x%04x: %08x %08x %08x %08x\n"
602 , aqm_reg_read(sc
, 0x430)
603 , aqm_reg_read(sc
, 0x430+4)
604 , aqm_reg_read(sc
, 0x430+8)
605 , aqm_reg_read(sc
, 0x430+12)
607 /* q configuration registers */
608 for (a
= 0x2000; a
< 0x20ff; a
+= 32)
609 printf("0x%04x: %08x %08x %08x %08x %08x %08x %08x %08x\n"
611 , aqm_reg_read(sc
, a
)
612 , aqm_reg_read(sc
, a
+4)
613 , aqm_reg_read(sc
, a
+8)
614 , aqm_reg_read(sc
, a
+12)
615 , aqm_reg_read(sc
, a
+16)
616 , aqm_reg_read(sc
, a
+20)
617 , aqm_reg_read(sc
, a
+24)
618 , aqm_reg_read(sc
, a
+28)
621 for (i
= 0x100; i
< sc
->aqmFreeSramAddress
; i
+= 32) {
623 printf("0x%04x: %08x %08x %08x %08x %08x %08x %08x %08x\n"
625 , aqm_reg_read(sc
, a
)
626 , aqm_reg_read(sc
, a
+4)
627 , aqm_reg_read(sc
, a
+8)
628 , aqm_reg_read(sc
, a
+12)
629 , aqm_reg_read(sc
, a
+16)
630 , aqm_reg_read(sc
, a
+20)
631 , aqm_reg_read(sc
, a
+24)
632 , aqm_reg_read(sc
, a
+28)
635 for (i
= 0; i
< 16; i
++) {
636 printf("Q[%2d] config 0x%08x status 0x%02x "
637 "Q[%2d] config 0x%08x status 0x%02x\n"
638 , i
, ixpqmgr_getqconfig(i
), ixpqmgr_getqstatus(i
)
639 , i
+16, ixpqmgr_getqconfig(i
+16), ixpqmgr_getqstatus(i
+16)
645 ixpqmgr_notify_enable(int qId
, int srcSel
)
647 struct ixpqmgr_softc
*sc
= ixpqmgr_sc
;
649 /* Calculate the checkMask and checkValue for this q */
650 aqm_calc_statuscheck(sc
, qId
, srcSel
);
652 /* Set the interrupt source if this queue is in the range 0-31 */
653 if (qId
< IX_QMGR_MIN_QUEUPP_QID
)
654 aqm_srcsel_write(sc
, qId
, srcSel
);
656 /* Enable the interrupt */
657 aqm_int_enable(sc
, qId
);
661 ixpqmgr_notify_disable(int qId
)
663 struct ixpqmgr_softc
*sc
= ixpqmgr_sc
;
665 aqm_int_disable(sc
, qId
);
669 * Rebuild the priority table used by the dispatcher.
672 ixpqmgr_rebuild(struct ixpqmgr_softc
*sc
)
675 int lowQuePriorityTableIndex
, uppQuePriorityTableIndex
;
678 sc
->lowPriorityTableFirstHalfMask
= 0;
679 sc
->uppPriorityTableFirstHalfMask
= 0;
681 lowQuePriorityTableIndex
= 0;
682 uppQuePriorityTableIndex
= 32;
683 for (pri
= 0; pri
< IX_QMGR_NUM_PRIORITY_LEVELS
; pri
++) {
684 /* low priority q's */
685 for (q
= 0; q
< IX_QMGR_MIN_QUEUPP_QID
; q
++) {
687 if (qi
->priority
== pri
) {
689 * Build the priority table bitmask which match the
690 * queues of the first half of the priority table.
692 if (lowQuePriorityTableIndex
< 16) {
693 sc
->lowPriorityTableFirstHalfMask
|=
696 sc
->priorityTable
[lowQuePriorityTableIndex
++] = q
;
699 /* high priority q's */
700 for (; q
< IX_QMGR_MAX_NUM_QUEUES
; q
++) {
702 if (qi
->priority
== pri
) {
704 * Build the priority table bitmask which match the
705 * queues of the first half of the priority table .
707 if (uppQuePriorityTableIndex
< 48) {
708 sc
->uppPriorityTableFirstHalfMask
|=
711 sc
->priorityTable
[uppQuePriorityTableIndex
++] = q
;
715 sc
->rebuildTable
= false;
719 * Count the number of leading zero bits in a word,
720 * and return the same value than the CLZ instruction.
721 * Note this is similar to the standard ffs function but
722 * it counts zero's from the MSB instead of the LSB.
724 * word (in) return value (out)
732 * The C version of this function is used as a replacement
733 * for system not providing the equivalent of the CLZ
734 * assembly language instruction.
736 * Note that this version is big-endian
739 _lzcount(uint32_t word
)
741 unsigned int lzcount
= 0;
745 while ((word
& 0x80000000) == 0) {
753 ixpqmgr_intr(void *arg
)
755 struct ixpqmgr_softc
*sc
= ixpqmgr_sc
;
756 uint32_t intRegVal
; /* Interrupt reg val */
758 int priorityTableIndex
; /* Priority table index */
759 int qIndex
; /* Current queue being processed */
761 /* Read the interrupt register */
762 intRegVal
= aqm_reg_read(sc
, IX_QMGR_QINTREG0_OFFSET
);
763 /* Write back to clear interrupt */
764 aqm_reg_write(sc
, IX_QMGR_QINTREG0_OFFSET
, intRegVal
);
766 DPRINTFn(5, sc
->sc_dev
, "%s: ISR0 0x%x ISR1 0x%x\n",
767 __func__
, intRegVal
, aqm_reg_read(sc
, IX_QMGR_QINTREG1_OFFSET
));
769 /* No queue has interrupt register set */
770 if (intRegVal
!= 0) {
771 /* get the first queue Id from the interrupt register value */
772 qIndex
= (32 - 1) - _lzcount(intRegVal
);
774 DPRINTFn(2, sc
->sc_dev
, "%s: ISR0 0x%x qIndex %u\n",
775 __func__
, intRegVal
, qIndex
);
778 * Optimize for single callback case.
780 qi
= &sc
->qinfo
[qIndex
];
781 if (intRegVal
== qi
->intRegCheckMask
) {
783 * Only 1 queue event triggered a notification.
784 * Call the callback function for this queue
786 qi
->cb(qIndex
, qi
->cbarg
);
789 * The event is triggered by more than 1 queue,
790 * the queue search will start from the beginning
791 * or the middle of the priority table.
793 * The search will end when all the bits of the interrupt
794 * register are cleared. There is no need to maintain
795 * a seperate value and test it at each iteration.
797 if (intRegVal
& sc
->lowPriorityTableFirstHalfMask
) {
798 priorityTableIndex
= 0;
800 priorityTableIndex
= 16;
803 * Iterate over the priority table until all the bits
804 * of the interrupt register are cleared.
807 qIndex
= sc
->priorityTable
[priorityTableIndex
++];
808 if (qIndex
>= IX_QMGR_MAX_NUM_QUEUES
)
810 qi
= &sc
->qinfo
[qIndex
];
812 /* If this queue caused this interrupt to be raised */
813 if (intRegVal
& qi
->intRegCheckMask
) {
814 /* Call the callback function for this queue */
815 qi
->cb(qIndex
, qi
->cbarg
);
816 /* Clear the interrupt register bit */
817 intRegVal
&= ~qi
->intRegCheckMask
;
819 } while (intRegVal
&&
820 priorityTableIndex
< IX_QMGR_MAX_NUM_QUEUES
);
824 /* Rebuild the priority table if needed */
825 if (sc
->rebuildTable
)
833 * Generate the parameters used to check if a Q's status matches
834 * the specified source select. We calculate which status word
835 * to check (statusWordOffset), the value to check the status
836 * against (statusCheckValue) and the mask (statusMask) to mask
837 * out all but the bits to check in the status word.
840 aqm_calc_statuscheck(int qId
, IxQMgrSourceId srcSel
)
842 struct qmgrInfo
*qi
= &qinfo
[qId
];
845 if (qId
< IX_QMGR_MIN_QUEUPP_QID
) {
847 case IX_QMGR_Q_SOURCE_ID_E
:
848 qi
->statusCheckValue
= IX_QMGR_Q_STATUS_E_BIT_MASK
;
849 qi
->statusMask
= IX_QMGR_Q_STATUS_E_BIT_MASK
;
851 case IX_QMGR_Q_SOURCE_ID_NE
:
852 qi
->statusCheckValue
= IX_QMGR_Q_STATUS_NE_BIT_MASK
;
853 qi
->statusMask
= IX_QMGR_Q_STATUS_NE_BIT_MASK
;
855 case IX_QMGR_Q_SOURCE_ID_NF
:
856 qi
->statusCheckValue
= IX_QMGR_Q_STATUS_NF_BIT_MASK
;
857 qi
->statusMask
= IX_QMGR_Q_STATUS_NF_BIT_MASK
;
859 case IX_QMGR_Q_SOURCE_ID_F
:
860 qi
->statusCheckValue
= IX_QMGR_Q_STATUS_F_BIT_MASK
;
861 qi
->statusMask
= IX_QMGR_Q_STATUS_F_BIT_MASK
;
863 case IX_QMGR_Q_SOURCE_ID_NOT_E
:
864 qi
->statusCheckValue
= 0;
865 qi
->statusMask
= IX_QMGR_Q_STATUS_E_BIT_MASK
;
867 case IX_QMGR_Q_SOURCE_ID_NOT_NE
:
868 qi
->statusCheckValue
= 0;
869 qi
->statusMask
= IX_QMGR_Q_STATUS_NE_BIT_MASK
;
871 case IX_QMGR_Q_SOURCE_ID_NOT_NF
:
872 qi
->statusCheckValue
= 0;
873 qi
->statusMask
= IX_QMGR_Q_STATUS_NF_BIT_MASK
;
875 case IX_QMGR_Q_SOURCE_ID_NOT_F
:
876 qi
->statusCheckValue
= 0;
877 qi
->statusMask
= IX_QMGR_Q_STATUS_F_BIT_MASK
;
880 /* Should never hit */
885 /* One nibble of status per queue so need to shift the
886 * check value and mask out to the correct position.
888 shiftVal
= (qId
% IX_QMGR_QUELOWSTAT_NUM_QUE_PER_WORD
) *
889 IX_QMGR_QUELOWSTAT_BITS_PER_Q
;
891 /* Calculate the which status word to check from the qId,
892 * 8 Qs status per word
894 qi
->statusWordOffset
= qId
/ IX_QMGR_QUELOWSTAT_NUM_QUE_PER_WORD
;
896 qi
->statusCheckValue
<<= shiftVal
;
897 qi
->statusMask
<<= shiftVal
;
899 /* One status word */
900 qi
->statusWordOffset
= 0;
901 /* Single bits per queue and int source bit hardwired NE,
904 qi
->statusMask
= 1 << (qId
- IX_QMGR_MIN_QUEUPP_QID
);
905 qi
->statusCheckValue
= qi
->statusMask
;
911 aqm_int_enable(struct ixpqmgr_softc
*sc
, int qId
)
916 if (qId
< IX_QMGR_MIN_QUEUPP_QID
)
917 reg
= IX_QMGR_QUEIEREG0_OFFSET
;
919 reg
= IX_QMGR_QUEIEREG1_OFFSET
;
920 v
= aqm_reg_read(sc
, reg
);
921 aqm_reg_write(sc
, reg
, v
| (1 << (qId
% IX_QMGR_MIN_QUEUPP_QID
)));
923 DPRINTF(sc
->sc_dev
, "%s(%u) 0x%lx: 0x%x => 0x%x\n",
924 __func__
, qId
, reg
, v
, aqm_reg_read(sc
, reg
));
928 aqm_int_disable(struct ixpqmgr_softc
*sc
, int qId
)
933 if (qId
< IX_QMGR_MIN_QUEUPP_QID
)
934 reg
= IX_QMGR_QUEIEREG0_OFFSET
;
936 reg
= IX_QMGR_QUEIEREG1_OFFSET
;
937 v
= aqm_reg_read(sc
, reg
);
938 aqm_reg_write(sc
, reg
, v
&~ (1 << (qId
% IX_QMGR_MIN_QUEUPP_QID
)));
940 DPRINTF(sc
->sc_dev
, "%s(%u) 0x%lx: 0x%x => 0x%x\n",
941 __func__
, qId
, reg
, v
, aqm_reg_read(sc
, reg
));
949 * N.B. this function will return 0 if supplied 0.
951 for (count
= 0; n
/2; count
++)
956 static __inline
unsigned
957 toAqmEntrySize(int entrySize
)
959 /* entrySize 1("00"),2("01"),4("10") */
960 return log2(entrySize
);
963 static __inline
unsigned
964 toAqmBufferSize(unsigned bufferSizeInWords
)
966 /* bufferSize 16("00"),32("01),64("10"),128("11") */
967 return log2(bufferSizeInWords
/ IX_QMGR_MIN_BUFFER_SIZE
);
970 static __inline
unsigned
971 toAqmWatermark(int watermark
)
974 * Watermarks 0("000"),1("001"),2("010"),4("011"),
975 * 8("100"),16("101"),32("110"),64("111")
977 return log2(2 * watermark
);
981 aqm_qcfg(struct ixpqmgr_softc
*sc
, int qId
, u_int ne
, u_int nf
)
983 const struct qmgrInfo
*qi
= &sc
->qinfo
[qId
];
985 uint32_t baseAddress
;
987 /* Build config register */
988 qCfg
= ((toAqmEntrySize(1) & IX_QMGR_ENTRY_SIZE_MASK
) <<
989 IX_QMGR_Q_CONFIG_ESIZE_OFFSET
)
990 | ((toAqmBufferSize(qi
->qSizeInWords
) & IX_QMGR_SIZE_MASK
) <<
991 IX_QMGR_Q_CONFIG_BSIZE_OFFSET
);
993 /* baseAddress, calculated relative to start address */
994 baseAddress
= sc
->aqmFreeSramAddress
;
996 /* base address must be word-aligned */
997 KASSERT((baseAddress
% IX_QMGR_BASE_ADDR_16_WORD_ALIGN
) == 0);
999 /* Now convert to a 16 word pointer as required by QUECONFIG register */
1000 baseAddress
>>= IX_QMGR_BASE_ADDR_16_WORD_SHIFT
;
1001 qCfg
|= baseAddress
<< IX_QMGR_Q_CONFIG_BADDR_OFFSET
;
1003 /* set watermarks */
1004 qCfg
|= (toAqmWatermark(ne
) << IX_QMGR_Q_CONFIG_NE_OFFSET
)
1005 | (toAqmWatermark(nf
) << IX_QMGR_Q_CONFIG_NF_OFFSET
);
1007 DPRINTF(sc
->sc_dev
, "%s(%u, %u, %u) 0x%x => 0x%x @ 0x%x\n",
1008 __func__
, qId
, ne
, nf
,
1009 aqm_reg_read(sc
, IX_QMGR_Q_CONFIG_ADDR_GET(qId
)),
1010 qCfg
, (u_int
)IX_QMGR_Q_CONFIG_ADDR_GET(qId
));
1012 aqm_reg_write(sc
, IX_QMGR_Q_CONFIG_ADDR_GET(qId
), qCfg
);
1016 aqm_srcsel_write(struct ixpqmgr_softc
*sc
, int qId
, int sourceId
)
1022 * Calculate the register offset; multiple queues split across registers
1024 off
= IX_QMGR_INT0SRCSELREG0_OFFSET
+
1025 ((qId
/ IX_QMGR_INTSRC_NUM_QUE_PER_WORD
) * sizeof(uint32_t));
1027 v
= aqm_reg_read(sc
, off
);
1028 if (off
== IX_QMGR_INT0SRCSELREG0_OFFSET
&& qId
== 0) {
1029 /* Queue 0 at INT0SRCSELREG should not corrupt the value bit-3 */
1032 const uint32_t bpq
= 32 / IX_QMGR_INTSRC_NUM_QUE_PER_WORD
;
1036 qshift
= (qId
& (IX_QMGR_INTSRC_NUM_QUE_PER_WORD
-1)) * bpq
;
1037 mask
= ((1 << bpq
) - 1) << qshift
; /* q's status mask */
1039 /* merge sourceId */
1040 v
= (v
&~ mask
) | ((sourceId
<< qshift
) & mask
);
1043 DPRINTF(sc
->sc_dev
, "%s(%u, %u) 0x%x => 0x%x @ 0x%lx\n",
1044 __func__
, qId
, sourceId
, aqm_reg_read(sc
, off
), v
, off
);
1045 aqm_reg_write(sc
, off
, v
);
1049 * Reset AQM registers to default values.
1052 aqm_reset(struct ixpqmgr_softc
*sc
)
1056 /* Reset queues 0..31 status registers 0..3 */
1057 aqm_reg_write(sc
, IX_QMGR_QUELOWSTAT0_OFFSET
,
1058 IX_QMGR_QUELOWSTAT_RESET_VALUE
);
1059 aqm_reg_write(sc
, IX_QMGR_QUELOWSTAT1_OFFSET
,
1060 IX_QMGR_QUELOWSTAT_RESET_VALUE
);
1061 aqm_reg_write(sc
, IX_QMGR_QUELOWSTAT2_OFFSET
,
1062 IX_QMGR_QUELOWSTAT_RESET_VALUE
);
1063 aqm_reg_write(sc
, IX_QMGR_QUELOWSTAT3_OFFSET
,
1064 IX_QMGR_QUELOWSTAT_RESET_VALUE
);
1066 /* Reset underflow/overflow status registers 0..1 */
1067 aqm_reg_write(sc
, IX_QMGR_QUEUOSTAT0_OFFSET
,
1068 IX_QMGR_QUEUOSTAT_RESET_VALUE
);
1069 aqm_reg_write(sc
, IX_QMGR_QUEUOSTAT1_OFFSET
,
1070 IX_QMGR_QUEUOSTAT_RESET_VALUE
);
1072 /* Reset queues 32..63 nearly empty status registers */
1073 aqm_reg_write(sc
, IX_QMGR_QUEUPPSTAT0_OFFSET
,
1074 IX_QMGR_QUEUPPSTAT0_RESET_VALUE
);
1076 /* Reset queues 32..63 full status registers */
1077 aqm_reg_write(sc
, IX_QMGR_QUEUPPSTAT1_OFFSET
,
1078 IX_QMGR_QUEUPPSTAT1_RESET_VALUE
);
1080 /* Reset int0 status flag source select registers 0..3 */
1081 aqm_reg_write(sc
, IX_QMGR_INT0SRCSELREG0_OFFSET
,
1082 IX_QMGR_INT0SRCSELREG_RESET_VALUE
);
1083 aqm_reg_write(sc
, IX_QMGR_INT0SRCSELREG1_OFFSET
,
1084 IX_QMGR_INT0SRCSELREG_RESET_VALUE
);
1085 aqm_reg_write(sc
, IX_QMGR_INT0SRCSELREG2_OFFSET
,
1086 IX_QMGR_INT0SRCSELREG_RESET_VALUE
);
1087 aqm_reg_write(sc
, IX_QMGR_INT0SRCSELREG3_OFFSET
,
1088 IX_QMGR_INT0SRCSELREG_RESET_VALUE
);
1090 /* Reset queue interrupt enable register 0..1 */
1091 aqm_reg_write(sc
, IX_QMGR_QUEIEREG0_OFFSET
,
1092 IX_QMGR_QUEIEREG_RESET_VALUE
);
1093 aqm_reg_write(sc
, IX_QMGR_QUEIEREG1_OFFSET
,
1094 IX_QMGR_QUEIEREG_RESET_VALUE
);
1096 /* Reset queue interrupt register 0..1 */
1097 aqm_reg_write(sc
, IX_QMGR_QINTREG0_OFFSET
, IX_QMGR_QINTREG_RESET_VALUE
);
1098 aqm_reg_write(sc
, IX_QMGR_QINTREG1_OFFSET
, IX_QMGR_QINTREG_RESET_VALUE
);
1100 /* Reset queue configuration words 0..63 */
1101 for (i
= 0; i
< IX_QMGR_MAX_NUM_QUEUES
; i
++)
1102 aqm_reg_write(sc
, sc
->qinfo
[i
].qConfigRegAddr
,
1103 IX_QMGR_QUECONFIG_RESET_VALUE
);
1105 /* XXX zero SRAM to simplify debugging */
1106 for (i
= IX_QMGR_QUEBUFFER_SPACE_OFFSET
;
1107 i
< IX_QMGR_AQM_SRAM_SIZE_IN_BYTES
; i
+= sizeof(uint32_t))
1108 aqm_reg_write(sc
, i
, 0);
1112 static device_method_t ixpqmgr_methods
[] = {
1113 DEVMETHOD(device_probe
, ixpqmgr_probe
),
1114 DEVMETHOD(device_attach
, ixpqmgr_attach
),
1115 DEVMETHOD(device_detach
, ixpqmgr_detach
),
1120 static driver_t ixpqmgr_driver
= {
1123 sizeof(struct ixpqmgr_softc
),
1125 static devclass_t ixpqmgr_devclass
;
1127 DRIVER_MODULE(ixpqmgr
, ixp
, ixpqmgr_driver
, ixpqmgr_devclass
, 0, 0);