No empty .Rs/.Re
[netbsd-mini2440.git] / sys / arch / sun3 / dev / si_sebuf.c
blob7d5fd029d023e46a84472c9344de8088c4a5ddc6
1 /* $NetBSD: si_sebuf.c,v 1.27 2008/04/28 20:23:38 martin Exp $ */
3 /*-
4 * Copyright (c) 1996 The NetBSD Foundation, Inc.
5 * All rights reserved.
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Gordon W. Ross.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
33 * Sun3/E SCSI driver (machine-dependent portion).
34 * The machine-independent parts are in ncr5380sbc.c
36 * XXX - Mostly from the si driver. Merge?
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: si_sebuf.c,v 1.27 2008/04/28 20:23:38 martin Exp $");
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/errno.h>
45 #include <sys/kernel.h>
46 #include <sys/malloc.h>
47 #include <sys/device.h>
48 #include <sys/buf.h>
49 #include <sys/proc.h>
51 #include <dev/scsipi/scsi_all.h>
52 #include <dev/scsipi/scsipi_all.h>
53 #include <dev/scsipi/scsipi_debug.h>
54 #include <dev/scsipi/scsiconf.h>
56 #include <machine/autoconf.h>
58 /* #define DEBUG XXX */
60 #include <dev/ic/ncr5380reg.h>
61 #include <dev/ic/ncr5380var.h>
63 #include "sereg.h"
64 #include "sevar.h"
67 * Transfers smaller than this are done using PIO
68 * (on assumption they're not worth DMA overhead)
70 #define MIN_DMA_LEN 128
73 * Transfers lager than 65535 bytes need to be split-up.
74 * (Some of the FIFO logic has only 16 bits counters.)
75 * Make the size an integer multiple of the page size
76 * to avoid buf/cluster remap problems. (paranoid?)
78 #define MAX_DMA_LEN 0xE000
81 * This structure is used to keep track of mapped DMA requests.
83 struct se_dma_handle {
84 int dh_flags;
85 #define SIDH_BUSY 1 /* This DH is in use */
86 #define SIDH_OUT 2 /* DMA does data out (write) */
87 u_char * dh_addr; /* KVA of start of buffer */
88 int dh_maplen; /* Length of KVA mapping. */
89 long dh_dma; /* Offset in DMA buffer. */
93 * The first structure member has to be the ncr5380_softc
94 * so we can just cast to go back and fourth between them.
96 struct se_softc {
97 struct ncr5380_softc ncr_sc;
98 volatile struct se_regs *sc_regs;
99 int sc_adapter_type;
100 int sc_adapter_iv; /* int. vec */
101 int sc_options; /* options for this instance */
102 int sc_reqlen; /* requested transfer length */
103 struct se_dma_handle *sc_dma;
104 /* DMA command block for the OBIO controller. */
105 void *sc_dmacmd;
108 /* Options for disconnect/reselect, DMA, and interrupts. */
109 #define SE_NO_DISCONNECT 0xff
110 #define SE_NO_PARITY_CHK 0xff00
111 #define SE_FORCE_POLLING 0x10000
112 #define SE_DISABLE_DMA 0x20000
114 void se_dma_alloc(struct ncr5380_softc *);
115 void se_dma_free(struct ncr5380_softc *);
116 void se_dma_poll(struct ncr5380_softc *);
118 void se_dma_setup(struct ncr5380_softc *);
119 void se_dma_start(struct ncr5380_softc *);
120 void se_dma_eop(struct ncr5380_softc *);
121 void se_dma_stop(struct ncr5380_softc *);
123 void se_intr_on (struct ncr5380_softc *);
124 void se_intr_off(struct ncr5380_softc *);
126 static int se_intr(void *);
127 static void se_reset(struct ncr5380_softc *);
130 * New-style autoconfig attachment
133 static int se_match(device_t, cfdata_t, void *);
134 static void se_attach(device_t, device_t, void *);
136 CFATTACH_DECL_NEW(si_sebuf, sizeof(struct se_softc),
137 se_match, se_attach, NULL, NULL);
139 static void se_minphys(struct buf *);
141 /* Options for disconnect/reselect, DMA, and interrupts. */
142 int se_options = SE_DISABLE_DMA | SE_FORCE_POLLING | 0xff;
144 /* How long to wait for DMA before declaring an error. */
145 int se_dma_intr_timo = 500; /* ticks (sec. X 100) */
147 int se_debug = 0;
149 static int
150 se_match(device_t parent, cfdata_t cf, void *args)
152 struct sebuf_attach_args *aa = args;
154 /* Match by name. */
155 if (strcmp(aa->name, "se"))
156 return 0;
158 /* Anyting else to check? */
160 return 1;
163 static void
164 se_attach(device_t parent, device_t self, void *args)
166 struct se_softc *sc = device_private(self);
167 struct ncr5380_softc *ncr_sc = &sc->ncr_sc;
168 struct cfdata *cf = device_cfdata(self);
169 struct sebuf_attach_args *aa = args;
170 volatile struct se_regs *regs;
171 int i;
173 ncr_sc->sc_dev = self;
175 /* Get options from config flags if specified. */
176 if (cf->cf_flags)
177 sc->sc_options = cf->cf_flags;
178 else
179 sc->sc_options = se_options;
181 aprint_normal(": options=0x%x\n", sc->sc_options);
183 sc->sc_adapter_type = aa->ca.ca_bustype;
184 sc->sc_adapter_iv = aa->ca.ca_intvec;
185 sc->sc_regs = regs = aa->regs;
188 * MD function pointers used by the MI code.
190 ncr_sc->sc_pio_out = ncr5380_pio_out;
191 ncr_sc->sc_pio_in = ncr5380_pio_in;
193 #if 0 /* XXX - not yet... */
194 ncr_sc->sc_dma_alloc = se_dma_alloc;
195 ncr_sc->sc_dma_free = se_dma_free;
196 ncr_sc->sc_dma_setup = se_dma_setup;
197 ncr_sc->sc_dma_start = se_dma_start;
198 ncr_sc->sc_dma_poll = se_dma_poll;
199 ncr_sc->sc_dma_eop = se_dma_eop;
200 ncr_sc->sc_dma_stop = se_dma_stop;
201 ncr_sc->sc_intr_on = se_intr_on;
202 ncr_sc->sc_intr_off = se_intr_off;
203 #endif /* XXX */
205 /* Attach interrupt handler. */
206 isr_add_vectored(se_intr, (void *)sc,
207 aa->ca.ca_intpri, aa->ca.ca_intvec);
209 /* Reset the hardware. */
210 se_reset(ncr_sc);
212 /* Do the common attach stuff. */
215 * Support the "options" (config file flags).
216 * Disconnect/reselect is a per-target mask.
217 * Interrupts and DMA are per-controller.
219 ncr_sc->sc_no_disconnect =
220 (sc->sc_options & SE_NO_DISCONNECT);
221 ncr_sc->sc_parity_disable =
222 (sc->sc_options & SE_NO_PARITY_CHK) >> 8;
223 if (sc->sc_options & SE_FORCE_POLLING)
224 ncr_sc->sc_flags |= NCR5380_FORCE_POLLING;
226 #if 1 /* XXX - Temporary */
227 /* XXX - In case we think DMA is completely broken... */
228 if (sc->sc_options & SE_DISABLE_DMA) {
229 /* Override this function pointer. */
230 ncr_sc->sc_dma_alloc = NULL;
232 #endif
233 ncr_sc->sc_min_dma_len = MIN_DMA_LEN;
236 * Initialize fields used by the MI code
238 ncr_sc->sci_r0 = &regs->ncrregs[0];
239 ncr_sc->sci_r1 = &regs->ncrregs[1];
240 ncr_sc->sci_r2 = &regs->ncrregs[2];
241 ncr_sc->sci_r3 = &regs->ncrregs[3];
242 ncr_sc->sci_r4 = &regs->ncrregs[4];
243 ncr_sc->sci_r5 = &regs->ncrregs[5];
244 ncr_sc->sci_r6 = &regs->ncrregs[6];
245 ncr_sc->sci_r7 = &regs->ncrregs[7];
247 ncr_sc->sc_rev = NCR_VARIANT_NCR5380;
250 * Allocate DMA handles.
252 i = SCI_OPENINGS * sizeof(struct se_dma_handle);
253 sc->sc_dma = malloc(i, M_DEVBUF, M_WAITOK);
254 if (sc->sc_dma == NULL)
255 panic("se: dma_malloc failed");
256 for (i = 0; i < SCI_OPENINGS; i++)
257 sc->sc_dma[i].dh_flags = 0;
259 ncr_sc->sc_channel.chan_id = 7;
260 ncr_sc->sc_adapter.adapt_minphys = se_minphys;
263 * Initialize se board itself.
265 ncr5380_attach(ncr_sc);
268 static void
269 se_reset(struct ncr5380_softc *ncr_sc)
271 struct se_softc *sc = (struct se_softc *)ncr_sc;
272 volatile struct se_regs *se = sc->sc_regs;
274 #ifdef DEBUG
275 if (se_debug) {
276 printf("%s\n", __func__);
278 #endif
280 /* The reset bits in the CSR are active low. */
281 se->se_csr = 0;
282 delay(10);
283 se->se_csr = SE_CSR_SCSI_RES /* | SE_CSR_INTR_EN */ ;
284 delay(10);
286 /* Make sure the DMA engine is stopped. */
287 se->dma_addr = 0;
288 se->dma_cntr = 0;
289 se->se_ivec = sc->sc_adapter_iv;
293 * This is called when the bus is going idle,
294 * so we want to enable the SBC interrupts.
295 * That is controlled by the DMA enable!
296 * Who would have guessed!
297 * What a NASTY trick!
299 void
300 se_intr_on(struct ncr5380_softc *ncr_sc)
302 struct se_softc *sc = (struct se_softc *)ncr_sc;
303 volatile struct se_regs *se = sc->sc_regs;
305 /* receive mode should be safer */
306 se->se_csr &= ~SE_CSR_SEND;
308 /* Clear the count so nothing happens. */
309 se->dma_cntr = 0;
311 /* Clear the start address too. (paranoid?) */
312 se->dma_addr = 0;
314 /* Finally, enable the DMA engine. */
315 se->se_csr |= SE_CSR_INTR_EN;
319 * This is called when the bus is idle and we are
320 * about to start playing with the SBC chip.
322 void
323 se_intr_off(struct ncr5380_softc *ncr_sc)
325 struct se_softc *sc = (struct se_softc *)ncr_sc;
326 volatile struct se_regs *se = sc->sc_regs;
328 se->se_csr &= ~SE_CSR_INTR_EN;
332 * This function is called during the COMMAND or MSG_IN phase
333 * that precedes a DATA_IN or DATA_OUT phase, in case we need
334 * to setup the DMA engine before the bus enters a DATA phase.
336 * On the VME version, setup the start addres, but clear the
337 * count (to make sure it stays idle) and set that later.
338 * XXX: The VME adapter appears to suppress SBC interrupts
339 * when the FIFO is not empty or the FIFO count is non-zero!
340 * XXX: Need to copy data into the DMA buffer...
342 void
343 se_dma_setup(struct ncr5380_softc *ncr_sc)
345 struct se_softc *sc = (struct se_softc *)ncr_sc;
346 struct sci_req *sr = ncr_sc->sc_current;
347 struct se_dma_handle *dh = sr->sr_dma_hand;
348 volatile struct se_regs *se = sc->sc_regs;
349 long data_pa;
350 int xlen;
353 * Get the DMA mapping for this segment.
354 * XXX - Should separate allocation and mapin.
356 data_pa = 0; /* XXX se_dma_kvtopa(dh->dh_dma); */
357 data_pa += (ncr_sc->sc_dataptr - dh->dh_addr);
358 if (data_pa & 1)
359 panic("%s: bad pa=0x%lx", __func__, data_pa);
360 xlen = ncr_sc->sc_datalen;
361 xlen &= ~1; /* XXX: necessary? */
362 sc->sc_reqlen = xlen; /* XXX: or less? */
364 #ifdef DEBUG
365 if (se_debug & 2) {
366 printf("%s: dh=%p, pa=0x%lx, xlen=0x%x\n",
367 __func__, dh, data_pa, xlen);
369 #endif
371 /* Set direction (send/recv) */
372 if (dh->dh_flags & SIDH_OUT) {
373 se->se_csr |= SE_CSR_SEND;
374 } else {
375 se->se_csr &= ~SE_CSR_SEND;
378 /* Load the start address. */
379 se->dma_addr = (ushort)(data_pa & 0xFFFF);
382 * Keep the count zero or it may start early!
384 se->dma_cntr = 0;
388 void
389 se_dma_start(struct ncr5380_softc *ncr_sc)
391 struct se_softc *sc = (struct se_softc *)ncr_sc;
392 struct sci_req *sr = ncr_sc->sc_current;
393 struct se_dma_handle *dh = sr->sr_dma_hand;
394 volatile struct se_regs *se = sc->sc_regs;
395 int s, xlen;
397 xlen = sc->sc_reqlen;
399 /* This MAY be time critical (not sure). */
400 s = splhigh();
402 se->dma_cntr = (ushort)(xlen & 0xFFFF);
405 * Acknowledge the phase change. (After DMA setup!)
406 * Put the SBIC into DMA mode, and start the transfer.
408 if (dh->dh_flags & SIDH_OUT) {
409 *ncr_sc->sci_tcmd = PHASE_DATA_OUT;
410 SCI_CLR_INTR(ncr_sc);
411 *ncr_sc->sci_icmd = SCI_ICMD_DATA;
412 *ncr_sc->sci_mode |= (SCI_MODE_DMA | SCI_MODE_DMA_IE);
413 *ncr_sc->sci_dma_send = 0; /* start it */
414 } else {
415 *ncr_sc->sci_tcmd = PHASE_DATA_IN;
416 SCI_CLR_INTR(ncr_sc);
417 *ncr_sc->sci_icmd = 0;
418 *ncr_sc->sci_mode |= (SCI_MODE_DMA | SCI_MODE_DMA_IE);
419 *ncr_sc->sci_irecv = 0; /* start it */
422 /* Let'er rip! */
423 se->se_csr |= SE_CSR_INTR_EN;
425 splx(s);
426 ncr_sc->sc_state |= NCR_DOINGDMA;
428 #ifdef DEBUG
429 if (se_debug & 2) {
430 printf("%s: started, flags=0x%x\n",
431 __func__, ncr_sc->sc_state);
433 #endif
437 void
438 se_dma_eop(struct ncr5380_softc *ncr_sc)
441 /* Not needed - DMA was stopped prior to examining sci_csr */
445 void
446 se_dma_stop(struct ncr5380_softc *ncr_sc)
448 struct se_softc *sc = (struct se_softc *)ncr_sc;
449 struct sci_req *sr = ncr_sc->sc_current;
450 struct se_dma_handle *dh = sr->sr_dma_hand;
451 volatile struct se_regs *se = sc->sc_regs;
452 int resid, ntrans;
454 if ((ncr_sc->sc_state & NCR_DOINGDMA) == 0) {
455 #ifdef DEBUG
456 printf("%s: DMA not running\n", __func__);
457 #endif
458 return;
460 ncr_sc->sc_state &= ~NCR_DOINGDMA;
462 /* First, halt the DMA engine. */
463 se->se_csr &= ~SE_CSR_INTR_EN; /* VME only */
465 /* Set an impossible phase to prevent data movement? */
466 *ncr_sc->sci_tcmd = PHASE_INVALID;
468 /* Note that timeout may have set the error flag. */
469 if (ncr_sc->sc_state & NCR_ABORTING)
470 goto out;
472 /* XXX: Wait for DMA to actually finish? */
475 * Now try to figure out how much actually transferred
477 resid = se->dma_cntr & 0xFFFF;
478 if (dh->dh_flags & SIDH_OUT)
479 if ((resid > 0) && (resid < sc->sc_reqlen))
480 resid++;
481 ntrans = sc->sc_reqlen - resid;
483 #ifdef DEBUG
484 if (se_debug & 2) {
485 printf("%s: resid=0x%x ntrans=0x%x\n",
486 __func__, resid, ntrans);
488 #endif
490 if (ntrans < MIN_DMA_LEN) {
491 printf("se: fifo count: 0x%x\n", resid);
492 ncr_sc->sc_state |= NCR_ABORTING;
493 goto out;
495 if (ntrans > ncr_sc->sc_datalen)
496 panic("%s: excess transfer", __func__);
498 /* Adjust data pointer */
499 ncr_sc->sc_dataptr += ntrans;
500 ncr_sc->sc_datalen -= ntrans;
502 out:
503 se->dma_addr = 0;
504 se->dma_cntr = 0;
506 /* Put SBIC back in PIO mode. */
507 *ncr_sc->sci_mode &= ~(SCI_MODE_DMA | SCI_MODE_DMA_IE);
508 *ncr_sc->sci_icmd = 0;
511 /*****************************************************************/
513 static void
514 se_minphys(struct buf *bp)
517 if (bp->b_bcount > MAX_DMA_LEN)
518 bp->b_bcount = MAX_DMA_LEN;
520 minphys(bp);
525 se_intr(void *arg)
527 struct se_softc *sc = arg;
528 volatile struct se_regs *se = sc->sc_regs;
529 int dma_error, claimed;
530 u_short csr;
532 claimed = 0;
533 dma_error = 0;
535 /* SBC interrupt? DMA interrupt? */
536 csr = se->se_csr;
537 NCR_TRACE("se_intr: csr=0x%x\n", csr);
539 if (csr & SE_CSR_SBC_IP) {
540 claimed = ncr5380_intr(&sc->ncr_sc);
541 #ifdef DEBUG
542 if (!claimed) {
543 printf("%s: spurious from SBC\n", __func__);
545 #endif
546 /* Yes, we DID cause this interrupt. */
547 claimed = 1;
550 return claimed;
554 /*****************************************************************
555 * Common functions for DMA
556 ****************************************************************/
559 * Allocate a DMA handle and put it in sc->sc_dma. Prepare
560 * for DMA transfer. On the Sun3/E, this means we have to
561 * allocate space in the DMA buffer for this transfer.
563 void
564 se_dma_alloc(struct ncr5380_softc *ncr_sc)
566 struct se_softc *sc = (struct se_softc *)ncr_sc;
567 struct sci_req *sr = ncr_sc->sc_current;
568 struct scsipi_xfer *xs = sr->sr_xs;
569 struct se_dma_handle *dh;
570 int i, xlen;
571 u_long addr;
573 #ifdef DIAGNOSTIC
574 if (sr->sr_dma_hand != NULL)
575 panic("%s: already have DMA handle", __func__);
576 #endif
578 addr = (u_long)ncr_sc->sc_dataptr;
579 xlen = ncr_sc->sc_datalen;
581 /* If the DMA start addr is misaligned then do PIO */
582 if ((addr & 1) || (xlen & 1)) {
583 printf("%s: misaligned.\n", __func__);
584 return;
587 /* Make sure our caller checked sc_min_dma_len. */
588 if (xlen < MIN_DMA_LEN)
589 panic("%s: xlen=0x%x", __func__, xlen);
592 * Never attempt single transfers of more than 63k, because
593 * our count register may be only 16 bits (an OBIO adapter).
594 * This should never happen since already bounded by minphys().
595 * XXX - Should just segment these...
597 if (xlen > MAX_DMA_LEN) {
598 printf("%s: excessive xlen=0x%x\n", __func__, xlen);
599 ncr_sc->sc_datalen = xlen = MAX_DMA_LEN;
602 /* Find free DMA handle. Guaranteed to find one since we have
603 as many DMA handles as the driver has processes. */
604 for (i = 0; i < SCI_OPENINGS; i++) {
605 if ((sc->sc_dma[i].dh_flags & SIDH_BUSY) == 0)
606 goto found;
608 panic("se: no free DMA handles.");
609 found:
611 dh = &sc->sc_dma[i];
612 dh->dh_flags = SIDH_BUSY;
614 /* Copy the "write" flag for convenience. */
615 if (xs->xs_control & XS_CTL_DATA_OUT)
616 dh->dh_flags |= SIDH_OUT;
618 dh->dh_addr = (uint8_t *)addr;
619 dh->dh_maplen = xlen;
620 dh->dh_dma = 0; /* XXX - Allocate space in DMA buffer. */
621 /* XXX: dh->dh_dma = alloc(xlen) */
622 if (!dh->dh_dma) {
623 /* Can't remap segment */
624 printf("%s: can't remap %p/0x%x\n",
625 __func__, dh->dh_addr, dh->dh_maplen);
626 dh->dh_flags = 0;
627 return;
630 /* success */
631 sr->sr_dma_hand = dh;
635 void
636 se_dma_free(struct ncr5380_softc *ncr_sc)
638 struct sci_req *sr = ncr_sc->sc_current;
639 struct se_dma_handle *dh = sr->sr_dma_hand;
641 #ifdef DIAGNOSTIC
642 if (dh == NULL)
643 panic("%s: no DMA handle", __func__);
644 #endif
646 if (ncr_sc->sc_state & NCR_DOINGDMA)
647 panic("%s: free while in progress", __func__);
649 if (dh->dh_flags & SIDH_BUSY) {
650 /* XXX: Should separate allocation and mapping. */
651 /* XXX: Give back the DMA space. */
652 /* XXX: free((void *)dh->dh_dma, dh->dh_maplen); */
653 dh->dh_dma = 0;
654 dh->dh_flags = 0;
656 sr->sr_dma_hand = NULL;
660 #define CSR_MASK SE_CSR_SBC_IP
661 #define POLL_TIMO 50000 /* X100 = 5 sec. */
664 * Poll (spin-wait) for DMA completion.
665 * Called right after xx_dma_start(), and
666 * xx_dma_stop() will be called next.
667 * Same for either VME or OBIO.
669 void
670 se_dma_poll(struct ncr5380_softc *ncr_sc)
672 struct se_softc *sc = (struct se_softc *)ncr_sc;
673 struct sci_req *sr = ncr_sc->sc_current;
674 volatile struct se_regs *se = sc->sc_regs;
675 int tmo;
677 /* Make sure DMA started successfully. */
678 if (ncr_sc->sc_state & NCR_ABORTING)
679 return;
682 * XXX: The Sun driver waits for ~SE_CSR_DMA_ACTIVE here
683 * XXX: (on obio) or even worse (on vme) a 10mS. delay!
684 * XXX: I really doubt that is necessary...
687 /* Wait for any "DMA complete" or error bits. */
688 tmo = POLL_TIMO;
689 for (;;) {
690 if (se->se_csr & CSR_MASK)
691 break;
692 if (--tmo <= 0) {
693 printf("se: DMA timeout (while polling)\n");
694 /* Indicate timeout as MI code would. */
695 sr->sr_flags |= SR_OVERDUE;
696 break;
698 delay(100);
700 NCR_TRACE("se_dma_poll: waited %d\n",
701 POLL_TIMO - tmo);
703 #ifdef DEBUG
704 if (se_debug & 2) {
705 printf("%s: done, csr=0x%x\n", __func__, se->se_csr);
707 #endif