No empty .Rs/.Re
[netbsd-mini2440.git] / sys / dev / ic / mvsata.c
blobaa83b70bbe0ca0d00be79c1cdf0b87bc807a3df0
1 /* $NetBSD: mvsata.c,v 1.2 2009/08/03 20:06:36 snj Exp $ */
2 /*
3 * Copyright (c) 2008 KIYOHARA Takashi
4 * All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
19 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
23 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
24 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
28 #include <sys/cdefs.h>
29 __KERNEL_RCSID(0, "$NetBSD: mvsata.c,v 1.2 2009/08/03 20:06:36 snj Exp $");
31 #include "opt_mvsata.h"
33 /* ATAPI implementation not finished. Also don't work shadow registers? */
34 //#include "atapibus.h"
36 #include <sys/param.h>
37 #if NATAPIBUS > 0
38 #include <sys/buf.h>
39 #endif
40 #include <sys/bus.h>
41 #include <sys/cpu.h>
42 #include <sys/device.h>
43 #include <sys/disklabel.h>
44 #include <sys/errno.h>
45 #include <sys/kernel.h>
46 #include <sys/malloc.h>
47 #include <sys/proc.h>
49 #include <machine/vmparam.h>
51 #include <dev/ata/atareg.h>
52 #include <dev/ata/atavar.h>
53 #include <dev/ic/wdcvar.h>
54 #include <dev/ata/satareg.h>
55 #include <dev/ata/satavar.h>
57 #if NATAPIBUS > 0
58 #include <dev/scsipi/scsi_all.h> /* for SCSI status */
59 #endif
61 #include <dev/pci/pcidevs.h>
63 #include <dev/ic/mvsatareg.h>
64 #include <dev/ic/mvsatavar.h>
67 #define MVSATA_DEV(sc) ((sc)->sc_wdcdev.sc_atac.atac_dev)
68 #define MVSATA_DEV2(mvport) ((mvport)->port_ata_channel.ch_atac->atac_dev)
70 #define MVSATA_HC_READ_4(hc, reg) \
71 bus_space_read_4((hc)->hc_iot, (hc)->hc_ioh, (reg))
72 #define MVSATA_HC_WRITE_4(hc, reg, val) \
73 bus_space_write_4((hc)->hc_iot, (hc)->hc_ioh, (reg), (val))
74 #define MVSATA_EDMA_READ_4(mvport, reg) \
75 bus_space_read_4((mvport)->port_iot, (mvport)->port_ioh, (reg))
76 #define MVSATA_EDMA_WRITE_4(mvport, reg, val) \
77 bus_space_write_4((mvport)->port_iot, (mvport)->port_ioh, (reg), (val))
78 #define MVSATA_WDC_READ_2(mvport, reg) \
79 bus_space_read_2((mvport)->port_iot, (mvport)->port_ioh, (reg))
80 #define MVSATA_WDC_READ_1(mvport, reg) \
81 bus_space_read_1((mvport)->port_iot, (mvport)->port_ioh, (reg))
82 #define MVSATA_WDC_WRITE_2(mvport, reg, val) \
83 bus_space_write_2((mvport)->port_iot, (mvport)->port_ioh, (reg), (val))
84 #define MVSATA_WDC_WRITE_1(mvport, reg, val) \
85 bus_space_write_1((mvport)->port_iot, (mvport)->port_ioh, (reg), (val))
87 #ifdef MVSATA_DEBUG
88 #define DPRINTF(x) if (mvsata_debug) printf x
89 #define DPRINTFN(n,x) if (mvsata_debug >= (n)) printf x
90 int mvsata_debug = 3;
91 #else
92 #define DPRINTF(x)
93 #define DPRINTFN(n,x)
94 #endif
96 #define ATA_DELAY 10000 /* 10s for a drive I/O */
97 #define ATAPI_DELAY 10 /* 10 ms, this is used only before
98 sending a cmd */
99 #define ATAPI_MODE_DELAY 1000 /* 1s, timeout for SET_FEATURE cmds */
101 #define MVSATA_EPRD_MAX_SIZE (sizeof(struct eprd) * (MAXPHYS / PAGE_SIZE))
104 #ifndef MVSATA_WITHOUTDMA
105 static int mvsata_bio(struct ata_drive_datas *, struct ata_bio *);
106 static void mvsata_reset_drive(struct ata_drive_datas *, int);
107 static void mvsata_reset_channel(struct ata_channel *, int);
108 static int mvsata_exec_command(struct ata_drive_datas *, struct ata_command *);
109 static int mvsata_addref(struct ata_drive_datas *);
110 static void mvsata_delref(struct ata_drive_datas *);
111 static void mvsata_killpending(struct ata_drive_datas *);
113 #if NATAPIBUS > 0
114 static void mvsata_atapibus_attach(struct atabus_softc *);
115 static void mvsata_atapi_scsipi_request(struct scsipi_channel *,
116 scsipi_adapter_req_t, void *);
117 static void mvsata_atapi_minphys(struct buf *);
118 static void mvsata_atapi_probe_device(struct atapibus_softc *, int);
119 static void mvsata_atapi_kill_pending(struct scsipi_periph *);
120 #endif
121 #endif
123 static void mvsata_setup_channel(struct ata_channel *);
125 #ifndef MVSATA_WITHOUTDMA
126 static void mvsata_bio_start(struct ata_channel *, struct ata_xfer *);
127 static int mvsata_bio_intr(struct ata_channel *, struct ata_xfer *, int);
128 static void mvsata_bio_kill_xfer(struct ata_channel *, struct ata_xfer *, int);
129 static void mvsata_bio_done(struct ata_channel *, struct ata_xfer *);
130 static int mvsata_bio_ready(struct mvsata_port *, struct ata_bio *, int,
131 int);
132 static void mvsata_wdc_cmd_start(struct ata_channel *, struct ata_xfer *);
133 static int mvsata_wdc_cmd_intr(struct ata_channel *, struct ata_xfer *, int);
134 static void mvsata_wdc_cmd_kill_xfer(struct ata_channel *, struct ata_xfer *,
135 int);
136 static void mvsata_wdc_cmd_done(struct ata_channel *, struct ata_xfer *);
137 static void mvsata_wdc_cmd_done_end(struct ata_channel *, struct ata_xfer *);
138 #if NATAPIBUS > 0
139 static void mvsata_atapi_start(struct ata_channel *, struct ata_xfer *);
140 static int mvsata_atapi_intr(struct ata_channel *, struct ata_xfer *, int);
141 static void mvsata_atapi_kill_xfer(struct ata_channel *, struct ata_xfer *,
142 int);
143 static void mvsata_atapi_reset(struct ata_channel *, struct ata_xfer *);
144 static void mvsata_atapi_phase_complete(struct ata_xfer *);
145 static void mvsata_atapi_done(struct ata_channel *, struct ata_xfer *);
146 static void mvsata_atapi_polldsc(void *);
147 #endif
149 static int mvsata_edma_inqueue(struct mvsata_port *, struct ata_bio *, void *);
150 static int mvsata_edma_handle(struct mvsata_port *, struct ata_xfer *);
151 static int mvsata_edma_wait(struct mvsata_port *, struct ata_xfer *, int);
152 static void mvsata_edma_timeout(void *);
153 static void mvsata_edma_rqq_remove(struct mvsata_port *, struct ata_xfer *);
154 #if NATAPIBUS > 0
155 static int mvsata_bdma_init(struct mvsata_port *, struct scsipi_xfer *, void *);
156 static void mvsata_bdma_start(struct mvsata_port *);
157 #endif
158 #endif
160 static int mvsata_port_init(struct mvsata_hc *, int);
161 static int mvsata_wdc_reg_init(struct mvsata_port *, struct wdc_regs *);
162 #ifndef MVSATA_WITHOUTDMA
163 static inline void mvsata_quetag_init(struct mvsata_port *);
164 static inline int mvsata_quetag_get(struct mvsata_port *);
165 static inline void mvsata_quetag_put(struct mvsata_port *, int);
166 static void *mvsata_edma_resource_prepare(struct mvsata_port *, bus_dma_tag_t,
167 bus_dmamap_t *, size_t, int);
168 static void mvsata_edma_resource_purge(struct mvsata_port *, bus_dma_tag_t,
169 bus_dmamap_t, void *);
170 static int mvsata_dma_bufload(struct mvsata_port *, int, void *, size_t, int);
171 static inline void mvsata_dma_bufunload(struct mvsata_port *, int, int);
172 #endif
174 static void mvsata_hreset_port(struct mvsata_port *);
175 static void mvsata_reset_port(struct mvsata_port *);
176 static void mvsata_reset_hc(struct mvsata_hc *);
177 #ifndef MVSATA_WITHOUTDMA
178 static void mvsata_softreset(struct mvsata_port *, int);
179 static void mvsata_edma_reset_qptr(struct mvsata_port *);
180 static inline void mvsata_edma_enable(struct mvsata_port *);
181 static int mvsata_edma_disable(struct mvsata_port *, int, int);
182 static void mvsata_edma_config(struct mvsata_port *, int);
184 static void mvsata_edma_setup_crqb(struct mvsata_port *, int, int,
185 struct ata_bio *);
186 #endif
187 static uint32_t mvsata_read_preamps_gen1(struct mvsata_port *);
188 static void mvsata_fix_phy_gen1(struct mvsata_port *);
189 static void mvsata_devconn_gen1(struct mvsata_port *);
191 static uint32_t mvsata_read_preamps_gen2(struct mvsata_port *);
192 static void mvsata_fix_phy_gen2(struct mvsata_port *);
193 #ifndef MVSATA_WITHOUTDMA
194 static void mvsata_edma_setup_crqb_gen2e(struct mvsata_port *, int, int,
195 struct ata_bio *);
197 #ifdef MVSATA_DEBUG
198 static void mvsata_print_crqb(struct mvsata_port *, int);
199 static void mvsata_print_crpb(struct mvsata_port *, int);
200 static void mvsata_print_eprd(struct mvsata_port *, int);
201 #endif
202 #endif
205 #ifndef MVSATA_WITHOUTDMA
206 struct ata_bustype mvsata_ata_bustype = {
207 SCSIPI_BUSTYPE_ATA,
208 mvsata_bio,
209 mvsata_reset_drive,
210 mvsata_reset_channel,
211 mvsata_exec_command,
212 ata_get_params,
213 mvsata_addref,
214 mvsata_delref,
215 mvsata_killpending
218 #if NATAPIBUS > 0
219 static const struct scsipi_bustype mvsata_atapi_bustype = {
220 SCSIPI_BUSTYPE_ATAPI,
221 atapi_scsipi_cmd,
222 atapi_interpret_sense,
223 atapi_print_addr,
224 mvsata_atapi_kill_pending,
226 #endif /* NATAPIBUS */
227 #endif
229 struct mvsata_product {
230 int model;
231 int hc;
232 int port;
233 int generation;
234 int flags;
235 } mvsata_products[] = {
236 { PCI_PRODUCT_MARVELL_88SX5040, 1, 4, gen1, 0 },
237 { PCI_PRODUCT_MARVELL_88SX5041, 1, 4, gen1, 0 },
238 { PCI_PRODUCT_MARVELL_88SX5080, 2, 4, gen1, 0 },
239 { PCI_PRODUCT_MARVELL_88SX5081, 2, 4, gen1, 0 },
240 { PCI_PRODUCT_MARVELL_88SX6040, 1, 4, gen2, 0 },
241 { PCI_PRODUCT_MARVELL_88SX6041, 1, 4, gen2, 0 },
242 { PCI_PRODUCT_MARVELL_88SX6042, 1, 4, gen2e, 0 },
243 { PCI_PRODUCT_MARVELL_88SX6080, 2, 4, gen2, MVSATA_FLAGS_PCIE },
244 { PCI_PRODUCT_MARVELL_88SX6081, 2, 4, gen2, MVSATA_FLAGS_PCIE },
245 { PCI_PRODUCT_ADP2_1420SA, 2, 4, gen2, MVSATA_FLAGS_PCIE },
246 { PCI_PRODUCT_MARVELL_88SX7042, 1, 4, gen2e, 0 },
247 { PCI_PRODUCT_ADP2_1430SA, 1, 4, gen2e, 0 },
248 { PCI_PRODUCT_TRIONES_ROCKETRAID_2310, 1, 4, gen2e, 0 },
249 { PCI_PRODUCT_MARVELL_88F5082, 1, 1, gen2e, 0 }, /* Orion */
250 { PCI_PRODUCT_MARVELL_88F5182, 1, 2, gen2e, 0 }, /* Orion */
251 { PCI_PRODUCT_MARVELL_88F6082, 1, 1, gen2e, 0 }, /* Orion */
252 #if 0 /* Marvell MV64660 Disco5: Product is 0x6490 ?? */
253 { PCI_PRODUCT_MARVELL_88F6490, 1, 1, gen2e, 0 }, /* Discover?*/
254 #endif
256 { -1, 0, 0, gen_unknown, 0 }
261 mvsata_attach(struct mvsata_softc *sc,
262 int (*mvsata_sreset)(struct mvsata_softc *),
263 int (*mvsata_misc_reset)(struct mvsata_softc *),
264 int read_pre_amps)
266 struct mvsata_hc *mvhc;
267 struct mvsata_port *mvport;
268 uint32_t (*read_preamps)(struct mvsata_port *) = NULL;
269 void (*_fix_phy)(struct mvsata_port *) = NULL;
270 #ifndef MVSATA_WITHOUTDMA
271 void (*edma_setup_crqb)
272 (struct mvsata_port *, int, int, struct ata_bio *) = NULL;
273 #endif
274 struct mvsata_product *product;
275 int hc, port, channel, i;
277 for (i = 0; mvsata_products[i].model != -1; i++)
278 if (sc->sc_model == mvsata_products[i].model)
279 break;
280 if (mvsata_products[i].model == -1) {
281 aprint_error_dev(MVSATA_DEV(sc), "unknown product 0x%04x\n",
282 sc->sc_model);
283 return EINVAL;
285 product = &mvsata_products[i];
286 aprint_normal_dev(MVSATA_DEV(sc), "Gen%s, %dhc, %dport/hc\n",
287 (product->generation == gen1) ? "I" :
288 ((product->generation == gen2) ? "II" : "IIe"),
289 product->hc, product->port);
292 switch (product->generation) {
293 case gen1:
294 mvsata_sreset = NULL;
295 read_pre_amps = 1; /* MUST */
296 read_preamps = mvsata_read_preamps_gen1;
297 _fix_phy = mvsata_fix_phy_gen1;
298 #ifndef MVSATA_WITHOUTDMA
299 edma_setup_crqb = mvsata_edma_setup_crqb;
300 #endif
301 break;
303 case gen2:
304 read_preamps = mvsata_read_preamps_gen2;
305 _fix_phy = mvsata_fix_phy_gen2;
306 #ifndef MVSATA_WITHOUTDMA
307 edma_setup_crqb = mvsata_edma_setup_crqb;
308 #endif
309 break;
311 case gen2e:
312 read_preamps = mvsata_read_preamps_gen2;
313 _fix_phy = mvsata_fix_phy_gen2;
314 #ifndef MVSATA_WITHOUTDMA
315 edma_setup_crqb = mvsata_edma_setup_crqb_gen2e;
316 #endif
317 break;
320 sc->sc_gen = mvsata_products[i].generation;
321 sc->sc_hc = mvsata_products[i].hc;
322 sc->sc_port = mvsata_products[i].port;
323 sc->sc_flags = mvsata_products[i].flags;
325 #ifdef MVSATA_WITHOUTDMA
326 sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DATA16;
327 #else
328 sc->sc_edma_setup_crqb = edma_setup_crqb;
329 sc->sc_wdcdev.sc_atac.atac_cap |=
330 (ATAC_CAP_DATA16 | ATAC_CAP_DMA | ATAC_CAP_UDMA);
331 #endif
332 sc->sc_wdcdev.sc_atac.atac_pio_cap = 4;
333 #ifndef MVSATA_WITHOUTDMA
334 sc->sc_wdcdev.sc_atac.atac_dma_cap = 0;
335 sc->sc_wdcdev.sc_atac.atac_udma_cap = 0;
336 #else
337 sc->sc_wdcdev.sc_atac.atac_dma_cap = 2;
338 sc->sc_wdcdev.sc_atac.atac_udma_cap = 6;
339 #endif
340 sc->sc_wdcdev.sc_atac.atac_channels = sc->sc_ata_channels;
341 sc->sc_wdcdev.sc_atac.atac_nchannels = sc->sc_hc * sc->sc_port;
342 #ifndef MVSATA_WITHOUTDMA
343 sc->sc_wdcdev.sc_atac.atac_bustype_ata = &mvsata_ata_bustype;
344 #if NATAPIBUS > 0
345 sc->sc_wdcdev.sc_atac.atac_atapibus_attach = mvsata_atapibus_attach;
346 #endif
347 #endif
348 sc->sc_wdcdev.sc_atac.atac_probe = wdc_sataprobe;
349 sc->sc_wdcdev.sc_atac.atac_set_modes = mvsata_setup_channel;
351 sc->sc_wdc_regs =
352 malloc(sizeof(struct wdc_regs) * product->hc * product->port,
353 M_DEVBUF, M_NOWAIT);
354 if (sc->sc_wdc_regs == NULL) {
355 aprint_error_dev(MVSATA_DEV(sc),
356 "can't allocate wdc regs memory\n");
357 return ENOMEM;
359 sc->sc_wdcdev.regs = sc->sc_wdc_regs;
361 for (hc = 0; hc < sc->sc_hc; hc++) {
362 mvhc = &sc->sc_hcs[hc];
363 mvhc->hc = hc;
364 mvhc->hc_sc = sc;
365 mvhc->hc_iot = sc->sc_iot;
366 if (bus_space_subregion(sc->sc_iot, sc->sc_ioh,
367 hc * SATAHC_REGISTER_SIZE, SATAHC_REGISTER_SIZE,
368 &mvhc->hc_ioh)) {
369 aprint_error_dev(MVSATA_DEV(sc),
370 "can't subregion SATAHC %d registers\n", hc);
371 continue;
374 for (port = 0; port < sc->sc_port; port++)
375 if (mvsata_port_init(mvhc, port) == 0) {
376 int pre_amps;
378 mvport = mvhc->hc_ports[port];
379 pre_amps = read_pre_amps ?
380 read_preamps(mvport) : 0x00000720;
381 mvport->_fix_phy_param.pre_amps = pre_amps;
382 mvport->_fix_phy_param._fix_phy = _fix_phy;
384 if (!mvsata_sreset)
385 mvsata_reset_port(mvport);
388 if (!mvsata_sreset)
389 mvsata_reset_hc(mvhc);
391 if (mvsata_sreset)
392 mvsata_sreset(sc);
394 if (mvsata_misc_reset)
395 mvsata_misc_reset(sc);
397 for (hc = 0; hc < sc->sc_hc; hc++)
398 for (port = 0; port < sc->sc_port; port++) {
399 mvport = sc->sc_hcs[hc].hc_ports[port];
400 if (mvport == NULL)
401 continue;
402 if (mvsata_sreset)
403 mvport->_fix_phy_param._fix_phy(mvport);
405 for (channel = 0; channel < sc->sc_hc * sc->sc_port; channel++)
406 wdcattach(sc->sc_ata_channels[channel]);
408 return 0;
412 mvsata_intr(struct mvsata_hc *mvhc)
414 struct mvsata_softc *sc = mvhc->hc_sc;
415 struct mvsata_port *mvport;
416 uint32_t cause;
417 int port, handled = 0;
419 cause = MVSATA_HC_READ_4(mvhc, SATAHC_IC);
421 DPRINTFN(3, ("%s:%d: mvsata_intr: cause=0x%08x\n",
422 device_xname(MVSATA_DEV(sc)), mvhc->hc, cause));
424 if (cause & SATAHC_IC_SAINTCOAL)
425 MVSATA_HC_WRITE_4(mvhc, SATAHC_IC, ~SATAHC_IC_SAINTCOAL);
426 cause &= ~SATAHC_IC_SAINTCOAL;
427 for (port = 0; port < sc->sc_port; port++) {
428 mvport = mvhc->hc_ports[port];
430 if (cause & SATAHC_IC_DONE(port)) {
431 #ifndef MVSATA_WITHOUTDMA
432 handled = mvsata_edma_handle(mvport, NULL);
433 #endif
434 MVSATA_HC_WRITE_4(mvhc, SATAHC_IC,
435 ~SATAHC_IC_DONE(port));
438 if (cause & SATAHC_IC_SADEVINTERRUPT(port)) {
439 wdcintr(&mvport->port_ata_channel);
440 MVSATA_HC_WRITE_4(mvhc, SATAHC_IC,
441 ~SATAHC_IC_SADEVINTERRUPT(port));
442 handled = 1;
446 return handled;
450 mvsata_error(struct mvsata_port *mvport)
452 struct mvsata_softc *sc = device_private(MVSATA_DEV2(mvport));
453 uint32_t cause;
454 int handled = 0;
456 cause = MVSATA_EDMA_READ_4(mvport, EDMA_IEC);
457 MVSATA_EDMA_WRITE_4(mvport, EDMA_IEC, ~cause);
459 DPRINTFN(3, ("%s:%d:%d:"
460 " mvsata_error: cause=0x%08x, mask=0x%08x, status=0x%08x\n",
461 device_xname(MVSATA_DEV2(mvport)), mvport->port_hc->hc,
462 mvport->port, cause, MVSATA_EDMA_READ_4(mvport, EDMA_IEM),
463 MVSATA_EDMA_READ_4(mvport, EDMA_S)));
465 cause &= MVSATA_EDMA_READ_4(mvport, EDMA_IEM);
466 if (!cause)
467 return 0;
469 /* If PM connected, connect/disconnect interrupts storm could happen */
470 if (MVSATA_EDMA_READ_4(mvport, EDMA_IEC) &
471 (EDMA_IE_EDEVDIS | EDMA_IE_EDEVCON))
472 if (sc->sc_gen == gen2 || sc->sc_gen == gen2e) {
473 delay(20 * 1000);
474 cause = MVSATA_EDMA_READ_4(mvport, EDMA_IEC);
475 MVSATA_EDMA_WRITE_4(mvport, EDMA_IEC, ~cause);
478 if (cause & EDMA_IE_EDEVDIS)
479 aprint_normal("%s:%d:%d: device disconnect\n",
480 device_xname(MVSATA_DEV2(mvport)),
481 mvport->port_hc->hc, mvport->port);
482 if (cause & EDMA_IE_EDEVCON) {
483 if (sc->sc_gen == gen1)
484 mvsata_devconn_gen1(mvport);
486 DPRINTFN(3, (" device connected\n"));
487 handled = 1;
489 #ifndef MVSATA_WITHOUTDMA
490 if ((sc->sc_gen == gen1 && cause & EDMA_IE_ETRANSINT) ||
491 (sc->sc_gen != gen1 && cause & EDMA_IE_ESELFDIS)) {
492 switch (mvport->port_edmamode) {
493 case dma:
494 case queued:
495 case ncq:
496 mvsata_edma_reset_qptr(mvport);
497 mvsata_edma_enable(mvport);
498 if (cause & EDMA_IE_EDEVERR)
499 break;
501 /* FALLTHROUGH */
503 case nodma:
504 default:
505 aprint_error(
506 "%s:%d:%d: EDMA self disable happen 0x%x\n",
507 device_xname(MVSATA_DEV2(mvport)),
508 mvport->port_hc->hc, mvport->port, cause);
509 break;
511 handled = 1;
513 #endif
514 if (cause & EDMA_IE_ETRANSINT) {
515 /* hot plug the Port Multiplier */
516 aprint_normal("%s:%d:%d: detect Port Multiplier?\n",
517 device_xname(MVSATA_DEV2(mvport)),
518 mvport->port_hc->hc, mvport->port);
521 return handled;
526 * ATA callback entry points
529 #ifndef MVSATA_WITHOUTDMA
530 static int
531 mvsata_bio(struct ata_drive_datas *drvp, struct ata_bio *ata_bio)
533 struct ata_channel *chp = drvp->chnl_softc;
534 struct atac_softc *atac = chp->ch_atac;
535 struct ata_xfer *xfer;
537 DPRINTFN(1, ("%s:%d: mvsata_bio: drive=%d, blkno=%lld, bcount=%ld\n",
538 device_xname(atac->atac_dev), chp->ch_channel, drvp->drive,
539 ata_bio->blkno, ata_bio->bcount));
541 xfer = ata_get_xfer(ATAXF_NOSLEEP);
542 if (xfer == NULL)
543 return ATACMD_TRY_AGAIN;
544 if (atac->atac_cap & ATAC_CAP_NOIRQ)
545 ata_bio->flags |= ATA_POLL;
546 if (ata_bio->flags & ATA_POLL)
547 xfer->c_flags |= C_POLL;
548 if ((drvp->drive_flags & (DRIVE_DMA | DRIVE_UDMA)) &&
549 (ata_bio->flags & ATA_SINGLE) == 0)
550 xfer->c_flags |= C_DMA;
551 xfer->c_drive = drvp->drive;
552 xfer->c_cmd = ata_bio;
553 xfer->c_databuf = ata_bio->databuf;
554 xfer->c_bcount = ata_bio->bcount;
555 xfer->c_start = mvsata_bio_start;
556 xfer->c_intr = mvsata_bio_intr;
557 xfer->c_kill_xfer = mvsata_bio_kill_xfer;
558 ata_exec_xfer(chp, xfer);
559 return (ata_bio->flags & ATA_ITSDONE) ? ATACMD_COMPLETE : ATACMD_QUEUED;
562 static void
563 mvsata_reset_drive(struct ata_drive_datas *drvp, int flags)
565 struct ata_channel *chp = drvp->chnl_softc;
566 struct mvsata_port *mvport = (struct mvsata_port *)chp;
567 uint32_t edma_c;
569 edma_c = MVSATA_EDMA_READ_4(mvport, EDMA_CMD);
571 DPRINTF(("%s:%d: mvsata_reset_drive: drive=%d (EDMA %sactive)\n",
572 device_xname(MVSATA_DEV2(mvport)), chp->ch_channel, drvp->drive,
573 (edma_c & EDMA_CMD_EENEDMA) ? "" : "not "));
575 if (edma_c & EDMA_CMD_EENEDMA)
576 mvsata_edma_disable(mvport, 10000, flags & AT_WAIT);
578 mvsata_softreset(mvport, flags & AT_WAIT);
580 if (edma_c & EDMA_CMD_EENEDMA) {
581 mvsata_edma_reset_qptr(mvport);
582 mvsata_edma_enable(mvport);
584 return;
587 static void
588 mvsata_reset_channel(struct ata_channel *chp, int flags)
590 struct mvsata_port *mvport = (struct mvsata_port *)chp;
591 struct mvsata_softc *sc = device_private(MVSATA_DEV2(mvport));
592 struct ata_xfer *xfer;
593 uint32_t sstat, ctrl;
594 int i;
596 DPRINTF(("%s: mvsata_reset_channel: channel=%d\n",
597 device_xname(MVSATA_DEV2(mvport)), chp->ch_channel));
599 mvsata_hreset_port(mvport);
600 sstat = sata_reset_interface(chp, mvport->port_iot,
601 mvport->port_sata_scontrol, mvport->port_sata_sstatus);
603 if (flags & AT_WAIT && sstat == SStatus_DET_DEV_NE &&
604 sc->sc_gen != gen1) {
605 /* Downgrade to GenI */
606 const uint32_t val = SControl_IPM_NONE | SControl_SPD_ANY |
607 SControl_DET_DISABLE;
609 MVSATA_EDMA_WRITE_4(mvport, mvport->port_sata_scontrol, val);
611 ctrl = MVSATA_EDMA_READ_4(mvport, SATA_SATAICFG);
612 ctrl &= ~(1 << 17); /* Disable GenII */
613 MVSATA_EDMA_WRITE_4(mvport, SATA_SATAICFG, ctrl);
615 mvsata_hreset_port(mvport);
616 sata_reset_interface(chp, mvport->port_iot,
617 mvport->port_sata_scontrol, mvport->port_sata_sstatus);
620 for (i = 0; MVSATA_EDMAQ_LEN; i++) {
621 xfer = mvport->port_reqtbl[i].xfer;
622 if (xfer == NULL)
623 continue;
624 chp->ch_queue->active_xfer = xfer;
625 xfer->c_kill_xfer(chp, xfer, KILL_RESET);
628 mvsata_edma_config(mvport, mvport->port_edmamode);
629 mvsata_edma_reset_qptr(mvport);
630 mvsata_edma_enable(mvport);
631 return;
635 static int
636 mvsata_exec_command(struct ata_drive_datas *drvp, struct ata_command *ata_c)
638 struct ata_channel *chp = drvp->chnl_softc;
639 #ifdef MVSATA_DEBUG
640 struct mvsata_port *mvport = (struct mvsata_port *)chp;
641 #endif
642 struct ata_xfer *xfer;
643 int rv, s;
645 DPRINTFN(1, ("%s:%d: mvsata_exec_command: drive=%d, bcount=%d,"
646 " r_command=0x%x, r_head=0x%x, r_cyl=0x%x, r_sector=0x%x,"
647 " r_count=0x%x, r_features=0x%x\n",
648 device_xname(MVSATA_DEV2(mvport)), chp->ch_channel,
649 drvp->drive, ata_c->bcount, ata_c->r_command, ata_c->r_head,
650 ata_c->r_cyl, ata_c->r_sector, ata_c->r_count, ata_c->r_features));
652 xfer = ata_get_xfer(ata_c->flags & AT_WAIT ? ATAXF_CANSLEEP :
653 ATAXF_NOSLEEP);
654 if (xfer == NULL)
655 return ATACMD_TRY_AGAIN;
656 if (ata_c->flags & AT_POLL)
657 xfer->c_flags |= C_POLL;
658 if (ata_c->flags & AT_WAIT)
659 xfer->c_flags |= C_WAIT;
660 xfer->c_drive = drvp->drive;
661 xfer->c_databuf = ata_c->data;
662 xfer->c_bcount = ata_c->bcount;
663 xfer->c_cmd = ata_c;
664 xfer->c_start = mvsata_wdc_cmd_start;
665 xfer->c_intr = mvsata_wdc_cmd_intr;
666 xfer->c_kill_xfer = mvsata_wdc_cmd_kill_xfer;
667 s = splbio();
668 ata_exec_xfer(chp, xfer);
669 #ifdef DIAGNOSTIC
670 if ((ata_c->flags & AT_POLL) != 0 &&
671 (ata_c->flags & AT_DONE) == 0)
672 panic("mvsata_exec_command: polled command not done");
673 #endif
674 if (ata_c->flags & AT_DONE)
675 rv = ATACMD_COMPLETE;
676 else {
677 if (ata_c->flags & AT_WAIT) {
678 while ((ata_c->flags & AT_DONE) == 0)
679 tsleep(ata_c, PRIBIO, "mvsatacmd", 0);
680 rv = ATACMD_COMPLETE;
681 } else
682 rv = ATACMD_QUEUED;
684 splx(s);
685 return rv;
688 static int
689 mvsata_addref(struct ata_drive_datas *drvp)
692 return 0;
695 static void
696 mvsata_delref(struct ata_drive_datas *drvp)
699 return;
702 static void
703 mvsata_killpending(struct ata_drive_datas *drvp)
706 return;
709 #if NATAPIBUS > 0
710 static void
711 mvsata_atapibus_attach(struct atabus_softc *ata_sc)
713 struct ata_channel *chp = ata_sc->sc_chan;
714 struct atac_softc *atac = chp->ch_atac;
715 struct scsipi_adapter *adapt = &atac->atac_atapi_adapter._generic;
716 struct scsipi_channel *chan = &chp->ch_atapi_channel;
719 * Fill in the scsipi_adapter.
721 adapt->adapt_dev = atac->atac_dev;
722 adapt->adapt_nchannels = atac->atac_nchannels;
723 adapt->adapt_request = mvsata_atapi_scsipi_request;
724 adapt->adapt_minphys = mvsata_atapi_minphys;
725 atac->atac_atapi_adapter.atapi_probe_device = mvsata_atapi_probe_device;
728 * Fill in the scsipi_channel.
730 memset(chan, 0, sizeof(*chan));
731 chan->chan_adapter = adapt;
732 chan->chan_bustype = &mvsata_atapi_bustype;
733 chan->chan_channel = chp->ch_channel;
734 chan->chan_flags = SCSIPI_CHAN_OPENINGS;
735 chan->chan_openings = 1;
736 chan->chan_max_periph = 1;
737 chan->chan_ntargets = 1;
738 chan->chan_nluns = 1;
740 chp->atapibus =
741 config_found_ia(ata_sc->sc_dev, "atapi", chan, atapiprint);
744 static void
745 mvsata_atapi_scsipi_request(struct scsipi_channel *chan,
746 scsipi_adapter_req_t req, void *arg)
748 struct scsipi_adapter *adapt = chan->chan_adapter;
749 struct scsipi_periph *periph;
750 struct scsipi_xfer *sc_xfer;
751 struct mvsata_softc *sc = device_private(adapt->adapt_dev);
752 struct atac_softc *atac = &sc->sc_wdcdev.sc_atac;
753 struct ata_xfer *xfer;
754 int channel = chan->chan_channel;
755 int drive, s;
757 switch (req) {
758 case ADAPTER_REQ_RUN_XFER:
759 sc_xfer = arg;
760 periph = sc_xfer->xs_periph;
761 drive = periph->periph_target;
763 if (!device_is_active(atac->atac_dev)) {
764 sc_xfer->error = XS_DRIVER_STUFFUP;
765 scsipi_done(sc_xfer);
766 return;
768 xfer = ata_get_xfer(ATAXF_NOSLEEP);
769 if (xfer == NULL) {
770 sc_xfer->error = XS_RESOURCE_SHORTAGE;
771 scsipi_done(sc_xfer);
772 return;
775 if (sc_xfer->xs_control & XS_CTL_POLL)
776 xfer->c_flags |= C_POLL;
777 xfer->c_drive = drive;
778 xfer->c_flags |= C_ATAPI;
779 xfer->c_cmd = sc_xfer;
780 xfer->c_databuf = sc_xfer->data;
781 xfer->c_bcount = sc_xfer->datalen;
782 xfer->c_start = mvsata_atapi_start;
783 xfer->c_intr = mvsata_atapi_intr;
784 xfer->c_kill_xfer = mvsata_atapi_kill_xfer;
785 xfer->c_dscpoll = 0;
786 s = splbio();
787 ata_exec_xfer(atac->atac_channels[channel], xfer);
788 #ifdef DIAGNOSTIC
789 if ((sc_xfer->xs_control & XS_CTL_POLL) != 0 &&
790 (sc_xfer->xs_status & XS_STS_DONE) == 0)
791 panic("mvsata_atapi_scsipi_request:"
792 " polled command not done");
793 #endif
794 splx(s);
795 return;
797 default:
798 /* Not supported, nothing to do. */
803 static void
804 mvsata_atapi_minphys(struct buf *bp)
807 if (bp->b_bcount > MAXPHYS)
808 bp->b_bcount = MAXPHYS;
809 minphys(bp);
812 static void
813 mvsata_atapi_probe_device(struct atapibus_softc *sc, int target)
815 struct scsipi_channel *chan = sc->sc_channel;
816 struct scsipi_periph *periph;
817 struct ataparams ids;
818 struct ataparams *id = &ids;
819 struct mvsata_softc *mvc =
820 device_private(chan->chan_adapter->adapt_dev);
821 struct atac_softc *atac = &mvc->sc_wdcdev.sc_atac;
822 struct ata_channel *chp = atac->atac_channels[chan->chan_channel];
823 struct ata_drive_datas *drvp = &chp->ch_drive[target];
824 struct scsipibus_attach_args sa;
825 char serial_number[21], model[41], firmware_revision[9];
826 int s;
828 /* skip if already attached */
829 if (scsipi_lookup_periph(chan, target, 0) != NULL)
830 return;
832 /* if no ATAPI device detected at attach time, skip */
833 if ((drvp->drive_flags & DRIVE_ATAPI) == 0) {
834 DPRINTF(("%s:%d: mvsata_atapi_probe_device:"
835 " drive %d not present\n",
836 device_xname(atac->atac_dev), chp->ch_channel, target));
837 return;
840 /* Some ATAPI devices need a bit more time after software reset. */
841 delay(5000);
842 if (ata_get_params(drvp, AT_WAIT, id) == 0) {
843 #ifdef ATAPI_DEBUG_PROBE
844 log(LOG_DEBUG, "%s:%d: drive %d: cmdsz 0x%x drqtype 0x%x\n",
845 device_xname(atac->atac_dev), chp->ch_channel, target,
846 id->atap_config & ATAPI_CFG_CMD_MASK,
847 id->atap_config & ATAPI_CFG_DRQ_MASK);
848 #endif
849 periph = scsipi_alloc_periph(M_NOWAIT);
850 if (periph == NULL) {
851 aprint_error_dev(atac->atac_dev,
852 "unable to allocate periph"
853 " for channel %d drive %d\n",
854 chp->ch_channel, target);
855 return;
857 periph->periph_dev = NULL;
858 periph->periph_channel = chan;
859 periph->periph_switch = &atapi_probe_periphsw;
860 periph->periph_target = target;
861 periph->periph_lun = 0;
862 periph->periph_quirks = PQUIRK_ONLYBIG;
864 #ifdef SCSIPI_DEBUG
865 if (SCSIPI_DEBUG_TYPE == SCSIPI_BUSTYPE_ATAPI &&
866 SCSIPI_DEBUG_TARGET == target)
867 periph->periph_dbflags |= SCSIPI_DEBUG_FLAGS;
868 #endif
869 periph->periph_type = ATAPI_CFG_TYPE(id->atap_config);
870 if (id->atap_config & ATAPI_CFG_REMOV)
871 periph->periph_flags |= PERIPH_REMOVABLE;
872 if (periph->periph_type == T_SEQUENTIAL) {
873 s = splbio();
874 drvp->drive_flags |= DRIVE_ATAPIST;
875 splx(s);
878 sa.sa_periph = periph;
879 sa.sa_inqbuf.type = ATAPI_CFG_TYPE(id->atap_config);
880 sa.sa_inqbuf.removable = id->atap_config & ATAPI_CFG_REMOV ?
881 T_REMOV : T_FIXED;
882 scsipi_strvis((u_char *)model, 40, id->atap_model, 40);
883 scsipi_strvis((u_char *)serial_number, 20, id->atap_serial, 20);
884 scsipi_strvis((u_char *)firmware_revision, 8, id->atap_revision,
886 sa.sa_inqbuf.vendor = model;
887 sa.sa_inqbuf.product = serial_number;
888 sa.sa_inqbuf.revision = firmware_revision;
891 * Determine the operating mode capabilities of the device.
893 if ((id->atap_config & ATAPI_CFG_CMD_MASK) == ATAPI_CFG_CMD_16)
894 periph->periph_cap |= PERIPH_CAP_CMD16;
895 /* XXX This is gross. */
896 periph->periph_cap |= (id->atap_config & ATAPI_CFG_DRQ_MASK);
898 drvp->drv_softc = atapi_probe_device(sc, target, periph, &sa);
900 if (drvp->drv_softc)
901 ata_probe_caps(drvp);
902 else {
903 s = splbio();
904 drvp->drive_flags &= ~DRIVE_ATAPI;
905 splx(s);
907 } else {
908 DPRINTF(("%s:%d: mvsata_atapi_probe_device:"
909 " ATAPI_IDENTIFY_DEVICE failed for drive %d: error 0x%x\n",
910 device_xname(atac->atac_dev), chp->ch_channel, target,
911 chp->ch_error));
912 s = splbio();
913 drvp->drive_flags &= ~DRIVE_ATAPI;
914 splx(s);
919 * Kill off all pending xfers for a periph.
921 * Must be called at splbio().
923 static void
924 mvsata_atapi_kill_pending(struct scsipi_periph *periph)
926 struct atac_softc *atac =
927 device_private(periph->periph_channel->chan_adapter->adapt_dev);
928 struct ata_channel *chp =
929 atac->atac_channels[periph->periph_channel->chan_channel];
931 ata_kill_pending(&chp->ch_drive[periph->periph_target]);
933 #endif /* NATAPIBUS > 0 */
934 #endif /* MVSATA_WITHOUTDMA */
938 * mvsata_setup_channel()
939 * Setup EDMA registers and prepare/purge DMA resources.
940 * We assuming already stopped the EDMA.
942 static void
943 mvsata_setup_channel(struct ata_channel *chp)
945 #if !defined(MVSATA_WITHOUTDMA) || defined(MVSATA_DEBUG)
946 struct mvsata_port *mvport = (struct mvsata_port *)chp;
947 #endif
948 struct ata_drive_datas *drvp;
949 uint32_t edma_mode;
950 int drive, s;
951 #ifndef MVSATA_WITHOUTDMA
952 int i;
953 const int crqb_size = sizeof(union mvsata_crqb) * MVSATA_EDMAQ_LEN;
954 const int crpb_size = sizeof(struct crpb) * MVSATA_EDMAQ_LEN;
955 const int eprd_buf_size = MVSATA_EPRD_MAX_SIZE * MVSATA_EDMAQ_LEN;
956 #endif
958 DPRINTF(("%s:%d: mvsata_setup_channel: ",
959 device_xname(MVSATA_DEV2(mvport)), chp->ch_channel));
961 edma_mode = nodma;
962 for (drive = 0; drive < chp->ch_ndrive; drive++) {
963 drvp = &chp->ch_drive[drive];
965 /* If no drive, skip */
966 if (!(drvp->drive_flags & DRIVE))
967 continue;
969 if (drvp->drive_flags & DRIVE_UDMA) {
970 /* use Ultra/DMA */
971 s = splbio();
972 drvp->drive_flags &= ~DRIVE_DMA;
973 splx(s);
976 if (drvp->drive_flags & (DRIVE_UDMA | DRIVE_DMA))
977 if (drvp->drive_flags & DRIVE_ATA)
978 edma_mode = dma;
981 DPRINTF(("EDMA %sactive mode\n", (edma_mode == nodma) ? "not " : ""));
983 #ifndef MVSATA_WITHOUTDMA
984 if (edma_mode == nodma) {
985 no_edma:
986 if (mvport->port_crqb != NULL)
987 mvsata_edma_resource_purge(mvport, mvport->port_dmat,
988 mvport->port_crqb_dmamap, mvport->port_crqb);
989 if (mvport->port_crpb != NULL)
990 mvsata_edma_resource_purge(mvport, mvport->port_dmat,
991 mvport->port_crpb_dmamap, mvport->port_crpb);
992 if (mvport->port_eprd != NULL)
993 mvsata_edma_resource_purge(mvport, mvport->port_dmat,
994 mvport->port_eprd_dmamap, mvport->port_eprd);
996 return;
999 if (mvport->port_crqb == NULL)
1000 mvport->port_crqb = mvsata_edma_resource_prepare(mvport,
1001 mvport->port_dmat, &mvport->port_crqb_dmamap, crqb_size, 1);
1002 if (mvport->port_crpb == NULL)
1003 mvport->port_crpb = mvsata_edma_resource_prepare(mvport,
1004 mvport->port_dmat, &mvport->port_crpb_dmamap, crpb_size, 0);
1005 if (mvport->port_eprd == NULL) {
1006 mvport->port_eprd = mvsata_edma_resource_prepare(mvport,
1007 mvport->port_dmat, &mvport->port_eprd_dmamap, eprd_buf_size,
1009 for (i = 0; i < MVSATA_EDMAQ_LEN; i++) {
1010 mvport->port_reqtbl[i].eprd_offset =
1011 i * MVSATA_EPRD_MAX_SIZE;
1012 mvport->port_reqtbl[i].eprd = mvport->port_eprd +
1013 i * MVSATA_EPRD_MAX_SIZE / sizeof(struct eprd);
1017 if (mvport->port_crqb == NULL || mvport->port_crpb == NULL ||
1018 mvport->port_eprd == NULL) {
1019 aprint_error_dev(MVSATA_DEV2(mvport),
1020 "channel %d: can't use EDMA\n", chp->ch_channel);
1021 s = splbio();
1022 for (drive = 0; drive < chp->ch_ndrive; drive++) {
1023 drvp = &chp->ch_drive[drive];
1025 /* If no drive, skip */
1026 if (!(drvp->drive_flags & DRIVE))
1027 continue;
1029 drvp->drive_flags &= ~(DRIVE_UDMA | DRIVE_DMA);
1031 splx(s);
1032 goto no_edma;
1035 mvsata_edma_config(mvport, edma_mode);
1036 mvsata_edma_reset_qptr(mvport);
1037 mvsata_edma_enable(mvport);
1038 #endif
1041 #ifndef MVSATA_WITHOUTDMA
1042 static void
1043 mvsata_bio_start(struct ata_channel *chp, struct ata_xfer *xfer)
1045 struct mvsata_port *mvport = (struct mvsata_port *)chp;
1046 struct mvsata_softc *sc = device_private(MVSATA_DEV2(mvport));
1047 struct atac_softc *atac = chp->ch_atac;
1048 struct wdc_softc *wdc = CHAN_TO_WDC(chp);
1049 struct ata_bio *ata_bio = xfer->c_cmd;
1050 struct ata_drive_datas *drvp = &chp->ch_drive[xfer->c_drive];
1051 int wait_flags = (xfer->c_flags & C_POLL) ? AT_POLL : 0;
1052 u_int16_t cyl;
1053 u_int8_t head, sect, cmd = 0;
1054 int nblks, error;
1056 DPRINTFN(2, ("%s:%d: mvsata_bio_start: drive=%d\n",
1057 device_xname(atac->atac_dev), chp->ch_channel, xfer->c_drive));
1059 if (xfer->c_flags & C_DMA)
1060 if (drvp->n_xfers <= NXFER)
1061 drvp->n_xfers++;
1063 again:
1066 * When starting a multi-sector transfer, or doing single-sector
1067 * transfers...
1069 if (xfer->c_skip == 0 || (ata_bio->flags & ATA_SINGLE) != 0) {
1070 if (ata_bio->flags & ATA_SINGLE)
1071 nblks = 1;
1072 else
1073 nblks = xfer->c_bcount / ata_bio->lp->d_secsize;
1074 /* Check for bad sectors and adjust transfer, if necessary. */
1075 if ((ata_bio->lp->d_flags & D_BADSECT) != 0) {
1076 long blkdiff;
1077 int i;
1079 for (i = 0; (blkdiff = ata_bio->badsect[i]) != -1;
1080 i++) {
1081 blkdiff -= ata_bio->blkno;
1082 if (blkdiff < 0)
1083 continue;
1084 if (blkdiff == 0)
1085 /* Replace current block of transfer. */
1086 ata_bio->blkno =
1087 ata_bio->lp->d_secperunit -
1088 ata_bio->lp->d_nsectors - i - 1;
1089 if (blkdiff < nblks) {
1090 /* Bad block inside transfer. */
1091 ata_bio->flags |= ATA_SINGLE;
1092 nblks = 1;
1094 break;
1096 /* Transfer is okay now. */
1098 if (xfer->c_flags & C_DMA) {
1099 ata_bio->nblks = nblks;
1100 ata_bio->nbytes = xfer->c_bcount;
1102 if (xfer->c_flags & C_POLL)
1103 sc->sc_enable_intr(mvport, 0 /*off*/);
1104 error = mvsata_edma_inqueue(mvport, ata_bio,
1105 (char *)xfer->c_databuf + xfer->c_skip);
1106 if (error) {
1107 if (error == EINVAL) {
1109 * We can't do DMA on this transfer
1110 * for some reason. Fall back to
1111 * PIO.
1113 xfer->c_flags &= ~C_DMA;
1114 error = 0;
1115 goto do_pio;
1117 if (error == EBUSY) {
1118 aprint_error_dev(atac->atac_dev,
1119 "channel %d: EDMA Queue full\n",
1120 chp->ch_channel);
1122 * XXXX: Perhaps, after it waits for
1123 * a while, it is necessary to call
1124 * bio_start again.
1127 ata_bio->error = ERR_DMA;
1128 ata_bio->r_error = 0;
1129 mvsata_bio_done(chp, xfer);
1130 return;
1132 chp->ch_flags |= ATACH_DMA_WAIT;
1133 /* start timeout machinery */
1134 if ((xfer->c_flags & C_POLL) == 0)
1135 callout_reset(&chp->ch_callout,
1136 ATA_DELAY / 1000 * hz,
1137 mvsata_edma_timeout, xfer);
1138 /* wait for irq */
1139 goto intr;
1140 } /* else not DMA */
1141 do_pio:
1142 if (ata_bio->flags & ATA_LBA48) {
1143 sect = 0;
1144 cyl = 0;
1145 head = 0;
1146 } else if (ata_bio->flags & ATA_LBA) {
1147 sect = (ata_bio->blkno >> 0) & 0xff;
1148 cyl = (ata_bio->blkno >> 8) & 0xffff;
1149 head = (ata_bio->blkno >> 24) & 0x0f;
1150 head |= WDSD_LBA;
1151 } else {
1152 int blkno = ata_bio->blkno;
1153 sect = blkno % ata_bio->lp->d_nsectors;
1154 sect++; /* Sectors begin with 1, not 0. */
1155 blkno /= ata_bio->lp->d_nsectors;
1156 head = blkno % ata_bio->lp->d_ntracks;
1157 blkno /= ata_bio->lp->d_ntracks;
1158 cyl = blkno;
1159 head |= WDSD_CHS;
1161 ata_bio->nblks = min(nblks, ata_bio->multi);
1162 ata_bio->nbytes = ata_bio->nblks * ata_bio->lp->d_secsize;
1163 KASSERT(nblks == 1 || (ata_bio->flags & ATA_SINGLE) == 0);
1164 if (ata_bio->nblks > 1)
1165 cmd = (ata_bio->flags & ATA_READ) ?
1166 WDCC_READMULTI : WDCC_WRITEMULTI;
1167 else
1168 cmd = (ata_bio->flags & ATA_READ) ?
1169 WDCC_READ : WDCC_WRITE;
1171 /* EDMA disable, if enabled this channel. */
1172 if (mvport->port_edmamode != nodma)
1173 mvsata_edma_disable(mvport, 10 /* ms */, wait_flags);
1175 /* Do control operations specially. */
1176 if (__predict_false(drvp->state < READY)) {
1178 * Actually, we want to be careful not to mess with
1179 * the control state if the device is currently busy,
1180 * but we can assume that we never get to this point
1181 * if that's the case.
1184 * If it's not a polled command, we need the kernel
1185 * thread
1187 if ((xfer->c_flags & C_POLL) == 0 && cpu_intr_p()) {
1188 chp->ch_queue->queue_freeze++;
1189 wakeup(&chp->ch_thread);
1190 return;
1192 if (mvsata_bio_ready(mvport, ata_bio, xfer->c_drive,
1193 (xfer->c_flags & C_POLL) ? AT_POLL : 0) != 0) {
1194 mvsata_bio_done(chp, xfer);
1195 return;
1199 /* Initiate command! */
1200 MVSATA_WDC_WRITE_1(mvport, SRB_H, WDSD_IBM);
1201 switch(wdc_wait_for_ready(chp, ATA_DELAY, wait_flags)) {
1202 case WDCWAIT_OK:
1203 break;
1204 case WDCWAIT_TOUT:
1205 goto timeout;
1206 case WDCWAIT_THR:
1207 return;
1209 if (ata_bio->flags & ATA_LBA48)
1210 wdccommandext(chp, xfer->c_drive, atacmd_to48(cmd),
1211 (u_int64_t)ata_bio->blkno, nblks);
1212 else
1213 wdccommand(chp, xfer->c_drive, cmd, cyl,
1214 head, sect, nblks,
1215 (ata_bio->lp->d_type == DTYPE_ST506) ?
1216 ata_bio->lp->d_precompcyl / 4 : 0);
1218 /* start timeout machinery */
1219 if ((xfer->c_flags & C_POLL) == 0)
1220 callout_reset(&chp->ch_callout,
1221 ATA_DELAY / 1000 * hz, wdctimeout, chp);
1222 } else if (ata_bio->nblks > 1) {
1223 /* The number of blocks in the last stretch may be smaller. */
1224 nblks = xfer->c_bcount / ata_bio->lp->d_secsize;
1225 if (ata_bio->nblks > nblks) {
1226 ata_bio->nblks = nblks;
1227 ata_bio->nbytes = xfer->c_bcount;
1230 /* If this was a write and not using DMA, push the data. */
1231 if ((ata_bio->flags & ATA_READ) == 0) {
1233 * we have to busy-wait here, we can't rely on running in
1234 * thread context.
1236 if (wdc_wait_for_drq(chp, ATA_DELAY, AT_POLL) != 0) {
1237 aprint_error_dev(atac->atac_dev,
1238 "channel %d: drive %d timeout waiting for DRQ,"
1239 " st=0x%02x, err=0x%02x\n",
1240 chp->ch_channel, xfer->c_drive, chp->ch_status,
1241 chp->ch_error);
1242 ata_bio->error = TIMEOUT;
1243 mvsata_bio_done(chp, xfer);
1244 return;
1246 if (chp->ch_status & WDCS_ERR) {
1247 ata_bio->error = ERROR;
1248 ata_bio->r_error = chp->ch_error;
1249 mvsata_bio_done(chp, xfer);
1250 return;
1253 wdc->dataout_pio(chp, drvp->drive_flags,
1254 (char *)xfer->c_databuf + xfer->c_skip, ata_bio->nbytes);
1257 intr:
1258 /* Wait for IRQ (either real or polled) */
1259 if ((ata_bio->flags & ATA_POLL) == 0) {
1260 chp->ch_flags |= ATACH_IRQ_WAIT;
1262 #if 1 /* XXXXX: Marvell SATA and mvsata(4) can accept next xfer. */
1263 chp->ch_queue->active_xfer = NULL;
1264 #endif
1265 } else {
1266 /* Wait for at last 400ns for status bit to be valid */
1267 delay(1);
1268 if (chp->ch_flags & ATACH_DMA_WAIT) {
1269 mvsata_edma_wait(mvport, xfer, ATA_DELAY);
1270 sc->sc_enable_intr(mvport, 1 /*on*/);
1271 chp->ch_flags &= ~ATACH_DMA_WAIT;
1273 mvsata_bio_intr(chp, xfer, 0);
1274 if ((ata_bio->flags & ATA_ITSDONE) == 0)
1275 goto again;
1277 return;
1279 timeout:
1280 aprint_error_dev(atac->atac_dev,
1281 "channel %d: drive %d not ready, st=0x%02x, err=0x%02x\n",
1282 chp->ch_channel, xfer->c_drive, chp->ch_status, chp->ch_error);
1283 ata_bio->error = TIMEOUT;
1284 mvsata_bio_done(chp, xfer);
1285 return;
1288 static int
1289 mvsata_bio_intr(struct ata_channel *chp, struct ata_xfer *xfer, int irq)
1291 struct atac_softc *atac = chp->ch_atac;
1292 struct wdc_softc *wdc = CHAN_TO_WDC(chp);
1293 struct ata_bio *ata_bio = xfer->c_cmd;
1294 struct ata_drive_datas *drvp = &chp->ch_drive[xfer->c_drive];
1296 DPRINTFN(2, ("%s:%d: mvsata_bio_intr: drive=%d\n",
1297 device_xname(atac->atac_dev), chp->ch_channel, xfer->c_drive));
1299 /* Is it not a transfer, but a control operation? */
1300 if (!(xfer->c_flags & C_DMA) && drvp->state < READY) {
1301 aprint_error_dev(atac->atac_dev,
1302 "channel %d: drive %d bad state %d in mvsata_bio_intr\n",
1303 chp->ch_channel, xfer->c_drive, drvp->state);
1304 panic("mvsata_bio_intr: bad state");
1308 * if we missed an interrupt transfer, reset and restart.
1309 * Don't try to continue transfer, we may have missed cycles.
1311 if (xfer->c_flags & C_TIMEOU) {
1312 ata_bio->error = TIMEOUT;
1313 mvsata_bio_done(chp, xfer);
1314 return 1;
1317 /* Ack interrupt done by wdc_wait_for_unbusy */
1318 if (!(xfer->c_flags & C_DMA) &&
1319 (wdc_wait_for_unbusy(chp, (irq == 0) ? ATA_DELAY : 0, AT_POLL)
1320 == WDCWAIT_TOUT)) {
1321 if (irq && (xfer->c_flags & C_TIMEOU) == 0)
1322 return 0; /* IRQ was not for us */
1323 aprint_error_dev(atac->atac_dev,
1324 "channel %d: drive %d timeout, c_bcount=%d, c_skip%d\n",
1325 chp->ch_channel, xfer->c_drive, xfer->c_bcount,
1326 xfer->c_skip);
1327 ata_bio->error = TIMEOUT;
1328 mvsata_bio_done(chp, xfer);
1329 return 1;
1332 if (xfer->c_flags & C_DMA) {
1333 if (ata_bio->error == NOERROR)
1334 goto end;
1335 if (ata_bio->error == ERR_DMA)
1336 ata_dmaerr(drvp,
1337 (xfer->c_flags & C_POLL) ? AT_POLL : 0);
1340 /* if we had an error, end */
1341 if (ata_bio->error != NOERROR) {
1342 mvsata_bio_done(chp, xfer);
1343 return 1;
1346 /* If this was a read and not using DMA, fetch the data. */
1347 if ((ata_bio->flags & ATA_READ) != 0) {
1348 if ((chp->ch_status & WDCS_DRQ) != WDCS_DRQ) {
1349 aprint_error_dev(atac->atac_dev,
1350 "channel %d: drive %d read intr before drq\n",
1351 chp->ch_channel, xfer->c_drive);
1352 ata_bio->error = TIMEOUT;
1353 mvsata_bio_done(chp, xfer);
1354 return 1;
1356 wdc->datain_pio(chp, drvp->drive_flags,
1357 (char *)xfer->c_databuf + xfer->c_skip, ata_bio->nbytes);
1360 end:
1361 ata_bio->blkno += ata_bio->nblks;
1362 ata_bio->blkdone += ata_bio->nblks;
1363 xfer->c_skip += ata_bio->nbytes;
1364 xfer->c_bcount -= ata_bio->nbytes;
1365 /* See if this transfer is complete. */
1366 if (xfer->c_bcount > 0) {
1367 if ((ata_bio->flags & ATA_POLL) == 0)
1368 /* Start the next operation */
1369 mvsata_bio_start(chp, xfer);
1370 else
1371 /* Let mvsata_bio_start do the loop */
1372 return 1;
1373 } else { /* Done with this transfer */
1374 ata_bio->error = NOERROR;
1375 mvsata_bio_done(chp, xfer);
1377 return 1;
1380 static void
1381 mvsata_bio_kill_xfer(struct ata_channel *chp, struct ata_xfer *xfer, int reason)
1383 struct mvsata_port *mvport = (struct mvsata_port *)chp;
1384 struct atac_softc *atac = chp->ch_atac;
1385 struct ata_bio *ata_bio = xfer->c_cmd;
1386 int drive = xfer->c_drive;
1388 DPRINTFN(2, ("%s:%d: mvsata_bio_kill_xfer: drive=%d\n",
1389 device_xname(atac->atac_dev), chp->ch_channel, xfer->c_drive));
1391 /* EDMA restart, if enabled */
1392 if (!(xfer->c_flags & C_DMA) && mvport->port_edmamode != nodma) {
1393 mvsata_edma_reset_qptr(mvport);
1394 mvsata_edma_enable(mvport);
1397 ata_free_xfer(chp, xfer);
1399 ata_bio->flags |= ATA_ITSDONE;
1400 switch (reason) {
1401 case KILL_GONE:
1402 ata_bio->error = ERR_NODEV;
1403 break;
1404 case KILL_RESET:
1405 ata_bio->error = ERR_RESET;
1406 break;
1407 default:
1408 aprint_error_dev(atac->atac_dev,
1409 "mvsata_bio_kill_xfer: unknown reason %d\n", reason);
1410 panic("mvsata_bio_kill_xfer");
1412 ata_bio->r_error = WDCE_ABRT;
1413 (*chp->ch_drive[drive].drv_done)(chp->ch_drive[drive].drv_softc);
1416 static void
1417 mvsata_bio_done(struct ata_channel *chp, struct ata_xfer *xfer)
1419 struct mvsata_port *mvport = (struct mvsata_port *)chp;
1420 struct ata_bio *ata_bio = xfer->c_cmd;
1421 int drive = xfer->c_drive;
1423 DPRINTFN(2, ("%s:%d: mvsata_bio_done: drive=%d, flags=0x%x\n",
1424 device_xname(MVSATA_DEV2(mvport)), chp->ch_channel, xfer->c_drive,
1425 (u_int)xfer->c_flags));
1427 callout_stop(&chp->ch_callout);
1429 /* EDMA restart, if enabled */
1430 if (!(xfer->c_flags & C_DMA) && mvport->port_edmamode != nodma) {
1431 mvsata_edma_reset_qptr(mvport);
1432 mvsata_edma_enable(mvport);
1435 /* feed back residual bcount to our caller */
1436 ata_bio->bcount = xfer->c_bcount;
1438 /* mark controller inactive and free xfer */
1439 chp->ch_queue->active_xfer = NULL;
1440 ata_free_xfer(chp, xfer);
1442 if (chp->ch_drive[drive].drive_flags & DRIVE_WAITDRAIN) {
1443 ata_bio->error = ERR_NODEV;
1444 chp->ch_drive[drive].drive_flags &= ~DRIVE_WAITDRAIN;
1445 wakeup(&chp->ch_queue->active_xfer);
1447 ata_bio->flags |= ATA_ITSDONE;
1448 (*chp->ch_drive[drive].drv_done)(chp->ch_drive[drive].drv_softc);
1449 atastart(chp);
1452 static int
1453 mvsata_bio_ready(struct mvsata_port *mvport, struct ata_bio *ata_bio, int drive,
1454 int flags)
1456 struct ata_channel *chp = &mvport->port_ata_channel;
1457 struct atac_softc *atac = chp->ch_atac;
1458 struct ata_drive_datas *drvp = &chp->ch_drive[drive];
1459 const char *errstring;
1462 * disable interrupts, all commands here should be quick
1463 * enouth to be able to poll, and we don't go here that often
1465 MVSATA_WDC_WRITE_1(mvport, SRB_CAS, WDCTL_4BIT | WDCTL_IDS);
1466 MVSATA_WDC_WRITE_1(mvport, SRB_H, WDSD_IBM);
1467 DELAY(10);
1468 errstring = "wait";
1469 if (wdcwait(chp, WDCS_DRDY, WDCS_DRDY, ATA_DELAY, flags))
1470 goto ctrltimeout;
1471 wdccommandshort(chp, drive, WDCC_RECAL);
1472 /* Wait for at last 400ns for status bit to be valid */
1473 DELAY(1);
1474 errstring = "recal";
1475 if (wdcwait(chp, WDCS_DRDY, WDCS_DRDY, ATA_DELAY, flags))
1476 goto ctrltimeout;
1477 if (chp->ch_status & (WDCS_ERR | WDCS_DWF))
1478 goto ctrlerror;
1479 /* Don't try to set modes if controller can't be adjusted */
1480 if (atac->atac_set_modes == NULL)
1481 goto geometry;
1482 /* Also don't try if the drive didn't report its mode */
1483 if ((drvp->drive_flags & DRIVE_MODE) == 0)
1484 goto geometry;
1485 wdccommand(chp, drvp->drive, SET_FEATURES, 0, 0, 0,
1486 0x08 | drvp->PIO_mode, WDSF_SET_MODE);
1487 errstring = "piomode";
1488 if (wdcwait(chp, WDCS_DRDY, WDCS_DRDY, ATA_DELAY, flags))
1489 goto ctrltimeout;
1490 if (chp->ch_status & (WDCS_ERR | WDCS_DWF))
1491 goto ctrlerror;
1492 if (drvp->drive_flags & DRIVE_UDMA)
1493 wdccommand(chp, drvp->drive, SET_FEATURES, 0, 0, 0,
1494 0x40 | drvp->UDMA_mode, WDSF_SET_MODE);
1495 else if (drvp->drive_flags & DRIVE_DMA)
1496 wdccommand(chp, drvp->drive, SET_FEATURES, 0, 0, 0,
1497 0x20 | drvp->DMA_mode, WDSF_SET_MODE);
1498 else
1499 goto geometry;
1500 errstring = "dmamode";
1501 if (wdcwait(chp, WDCS_DRDY, WDCS_DRDY, ATA_DELAY, flags))
1502 goto ctrltimeout;
1503 if (chp->ch_status & (WDCS_ERR | WDCS_DWF))
1504 goto ctrlerror;
1505 geometry:
1506 if (ata_bio->flags & ATA_LBA)
1507 goto multimode;
1508 wdccommand(chp, drive, WDCC_IDP, ata_bio->lp->d_ncylinders,
1509 ata_bio->lp->d_ntracks - 1, 0, ata_bio->lp->d_nsectors,
1510 (ata_bio->lp->d_type == DTYPE_ST506) ?
1511 ata_bio->lp->d_precompcyl / 4 : 0);
1512 errstring = "geometry";
1513 if (wdcwait(chp, WDCS_DRDY, WDCS_DRDY, ATA_DELAY, flags))
1514 goto ctrltimeout;
1515 if (chp->ch_status & (WDCS_ERR | WDCS_DWF))
1516 goto ctrlerror;
1517 multimode:
1518 if (ata_bio->multi == 1)
1519 goto ready;
1520 wdccommand(chp, drive, WDCC_SETMULTI, 0, 0, 0, ata_bio->multi, 0);
1521 errstring = "setmulti";
1522 if (wdcwait(chp, WDCS_DRDY, WDCS_DRDY, ATA_DELAY, flags))
1523 goto ctrltimeout;
1524 if (chp->ch_status & (WDCS_ERR | WDCS_DWF))
1525 goto ctrlerror;
1526 ready:
1527 drvp->state = READY;
1529 * The drive is usable now
1531 MVSATA_WDC_WRITE_1(mvport, SRB_CAS, WDCTL_4BIT);
1532 delay(10); /* some drives need a little delay here */
1533 return 0;
1535 ctrltimeout:
1536 aprint_error_dev(atac->atac_dev, "channel %d: drive %d %s timed out\n",
1537 chp->ch_channel, drive, errstring);
1538 ata_bio->error = TIMEOUT;
1539 goto ctrldone;
1540 ctrlerror:
1541 aprint_error_dev(atac->atac_dev, "channel %d: drive %d %s ",
1542 chp->ch_channel, drive, errstring);
1543 if (chp->ch_status & WDCS_DWF) {
1544 aprint_error("drive fault\n");
1545 ata_bio->error = ERR_DF;
1546 } else {
1547 aprint_error("error (%x)\n", chp->ch_error);
1548 ata_bio->r_error = chp->ch_error;
1549 ata_bio->error = ERROR;
1551 ctrldone:
1552 drvp->state = 0;
1553 MVSATA_WDC_WRITE_1(mvport, SRB_CAS, WDCTL_4BIT);
1554 return -1;
1557 static void
1558 mvsata_wdc_cmd_start(struct ata_channel *chp, struct ata_xfer *xfer)
1560 struct mvsata_port *mvport = (struct mvsata_port *)chp;
1561 int drive = xfer->c_drive;
1562 int wait_flags = (xfer->c_flags & C_POLL) ? AT_POLL : 0;
1563 struct ata_command *ata_c = xfer->c_cmd;
1565 DPRINTFN(1, ("%s:%d: mvsata_cmd_start: drive=%d\n",
1566 device_xname(MVSATA_DEV2(mvport)), chp->ch_channel, drive));
1568 /* First, EDMA disable, if enabled this channel. */
1569 if (mvport->port_edmamode != nodma)
1570 mvsata_edma_disable(mvport, 10 /* ms */, wait_flags);
1572 MVSATA_WDC_WRITE_1(mvport, SRB_H, WDSD_IBM);
1573 switch(wdcwait(chp, ata_c->r_st_bmask | WDCS_DRQ,
1574 ata_c->r_st_bmask, ata_c->timeout, wait_flags)) {
1575 case WDCWAIT_OK:
1576 break;
1577 case WDCWAIT_TOUT:
1578 ata_c->flags |= AT_TIMEOU;
1579 mvsata_wdc_cmd_done(chp, xfer);
1580 return;
1581 case WDCWAIT_THR:
1582 return;
1584 if (ata_c->flags & AT_POLL)
1585 /* polled command, disable interrupts */
1586 MVSATA_WDC_WRITE_1(mvport, SRB_CAS, WDCTL_4BIT | WDCTL_IDS);
1587 wdccommand(chp, drive, ata_c->r_command, ata_c->r_cyl, ata_c->r_head,
1588 ata_c->r_sector, ata_c->r_count, ata_c->r_features);
1590 if ((ata_c->flags & AT_POLL) == 0) {
1591 chp->ch_flags |= ATACH_IRQ_WAIT; /* wait for interrupt */
1592 callout_reset(&chp->ch_callout, ata_c->timeout / 1000 * hz,
1593 wdctimeout, chp);
1594 return;
1597 * Polled command. Wait for drive ready or drq. Done in intr().
1598 * Wait for at last 400ns for status bit to be valid.
1600 delay(10); /* 400ns delay */
1601 mvsata_wdc_cmd_intr(chp, xfer, 0);
1604 static int
1605 mvsata_wdc_cmd_intr(struct ata_channel *chp, struct ata_xfer *xfer, int irq)
1607 struct mvsata_port *mvport = (struct mvsata_port *)chp;
1608 struct wdc_softc *wdc = CHAN_TO_WDC(chp);
1609 struct ata_command *ata_c = xfer->c_cmd;
1610 int bcount = ata_c->bcount;
1611 char *data = ata_c->data;
1612 int wflags;
1613 int drive_flags;
1615 if (ata_c->r_command == WDCC_IDENTIFY ||
1616 ata_c->r_command == ATAPI_IDENTIFY_DEVICE)
1618 * The IDENTIFY data has been designed as an array of
1619 * u_int16_t, so we can byteswap it on the fly.
1620 * Historically it's what we have always done so keeping it
1621 * here ensure binary backward compatibility.
1623 drive_flags = DRIVE_NOSTREAM |
1624 chp->ch_drive[xfer->c_drive].drive_flags;
1625 else
1627 * Other data structure are opaque and should be transfered
1628 * as is.
1630 drive_flags = chp->ch_drive[xfer->c_drive].drive_flags;
1632 if ((ata_c->flags & (AT_WAIT | AT_POLL)) == (AT_WAIT | AT_POLL))
1633 /* both wait and poll, we can tsleep here */
1634 wflags = AT_WAIT | AT_POLL;
1635 else
1636 wflags = AT_POLL;
1638 again:
1639 DPRINTFN(1, ("%s:%d: mvsata_cmd_intr: drive=%d\n",
1640 device_xname(MVSATA_DEV2(mvport)), chp->ch_channel, xfer->c_drive));
1643 * after a ATAPI_SOFT_RESET, the device will have released the bus.
1644 * Reselect again, it doesn't hurt for others commands, and the time
1645 * penalty for the extra regiter write is acceptable,
1646 * wdc_exec_command() isn't called often (mosly for autoconfig)
1648 MVSATA_WDC_WRITE_1(mvport, SRB_H, WDSD_IBM);
1649 if ((ata_c->flags & AT_XFDONE) != 0) {
1651 * We have completed a data xfer. The drive should now be
1652 * in its initial state
1654 if (wdcwait(chp, ata_c->r_st_bmask | WDCS_DRQ,
1655 ata_c->r_st_bmask, (irq == 0) ? ata_c->timeout : 0,
1656 wflags) == WDCWAIT_TOUT) {
1657 if (irq && (xfer->c_flags & C_TIMEOU) == 0)
1658 return 0; /* IRQ was not for us */
1659 ata_c->flags |= AT_TIMEOU;
1661 goto out;
1663 if (wdcwait(chp, ata_c->r_st_pmask, ata_c->r_st_pmask,
1664 (irq == 0) ? ata_c->timeout : 0, wflags) == WDCWAIT_TOUT) {
1665 if (irq && (xfer->c_flags & C_TIMEOU) == 0)
1666 return 0; /* IRQ was not for us */
1667 ata_c->flags |= AT_TIMEOU;
1668 goto out;
1670 if (ata_c->flags & AT_READ) {
1671 if ((chp->ch_status & WDCS_DRQ) == 0) {
1672 ata_c->flags |= AT_TIMEOU;
1673 goto out;
1675 wdc->datain_pio(chp, drive_flags, data, bcount);
1676 /* at this point the drive should be in its initial state */
1677 ata_c->flags |= AT_XFDONE;
1679 * XXX checking the status register again here cause some
1680 * hardware to timeout.
1682 } else if (ata_c->flags & AT_WRITE) {
1683 if ((chp->ch_status & WDCS_DRQ) == 0) {
1684 ata_c->flags |= AT_TIMEOU;
1685 goto out;
1687 wdc->dataout_pio(chp, drive_flags, data, bcount);
1688 ata_c->flags |= AT_XFDONE;
1689 if ((ata_c->flags & AT_POLL) == 0) {
1690 chp->ch_flags |= ATACH_IRQ_WAIT; /* wait for intr */
1691 callout_reset(&chp->ch_callout,
1692 mstohz(ata_c->timeout), wdctimeout, chp);
1693 return 1;
1694 } else
1695 goto again;
1697 out:
1698 mvsata_wdc_cmd_done(chp, xfer);
1699 return 1;
1702 static void
1703 mvsata_wdc_cmd_kill_xfer(struct ata_channel *chp, struct ata_xfer *xfer,
1704 int reason)
1706 struct mvsata_port *mvport = (struct mvsata_port *)chp;
1707 struct ata_command *ata_c = xfer->c_cmd;
1709 DPRINTFN(1, ("%s:%d: mvsata_cmd_kill_xfer: drive=%d\n",
1710 device_xname(MVSATA_DEV2(mvport)), chp->ch_channel, xfer->c_drive));
1712 switch (reason) {
1713 case KILL_GONE:
1714 ata_c->flags |= AT_GONE;
1715 break;
1716 case KILL_RESET:
1717 ata_c->flags |= AT_RESET;
1718 break;
1719 default:
1720 aprint_error_dev(MVSATA_DEV2(mvport),
1721 "mvsata_cmd_kill_xfer: unknown reason %d\n", reason);
1722 panic("mvsata_cmd_kill_xfer");
1724 mvsata_wdc_cmd_done_end(chp, xfer);
1727 static void
1728 mvsata_wdc_cmd_done(struct ata_channel *chp, struct ata_xfer *xfer)
1730 struct mvsata_port *mvport = (struct mvsata_port *)chp;
1731 struct atac_softc *atac = chp->ch_atac;
1732 struct ata_command *ata_c = xfer->c_cmd;
1734 DPRINTFN(1, ("%s:%d: mvsata_cmd_done: drive=%d, flags=0x%x\n",
1735 device_xname(atac->atac_dev), chp->ch_channel, xfer->c_drive,
1736 ata_c->flags));
1738 if (chp->ch_status & WDCS_DWF)
1739 ata_c->flags |= AT_DF;
1740 if (chp->ch_status & WDCS_ERR) {
1741 ata_c->flags |= AT_ERROR;
1742 ata_c->r_error = chp->ch_error;
1744 if ((ata_c->flags & AT_READREG) != 0 &&
1745 device_is_active(atac->atac_dev) &&
1746 (ata_c->flags & (AT_ERROR | AT_DF)) == 0) {
1747 ata_c->r_head = MVSATA_WDC_READ_1(mvport, SRB_H);
1748 ata_c->r_count = MVSATA_WDC_READ_1(mvport, SRB_SC);
1749 ata_c->r_sector = MVSATA_WDC_READ_1(mvport, SRB_LBAL);
1750 ata_c->r_cyl = MVSATA_WDC_READ_1(mvport, SRB_LBAM) << 8;
1751 ata_c->r_cyl |= MVSATA_WDC_READ_1(mvport, SRB_LBAH);
1752 ata_c->r_error = MVSATA_WDC_READ_1(mvport, SRB_FE);
1753 ata_c->r_features = ata_c->r_error;
1755 callout_stop(&chp->ch_callout);
1756 chp->ch_queue->active_xfer = NULL;
1757 if (ata_c->flags & AT_POLL) {
1758 /* enable interrupts */
1759 MVSATA_WDC_WRITE_1(mvport, SRB_CAS, WDCTL_4BIT);
1760 delay(10); /* some drives need a little delay here */
1762 if (chp->ch_drive[xfer->c_drive].drive_flags & DRIVE_WAITDRAIN) {
1763 mvsata_wdc_cmd_kill_xfer(chp, xfer, KILL_GONE);
1764 chp->ch_drive[xfer->c_drive].drive_flags &= ~DRIVE_WAITDRAIN;
1765 wakeup(&chp->ch_queue->active_xfer);
1766 } else
1767 mvsata_wdc_cmd_done_end(chp, xfer);
1770 static void
1771 mvsata_wdc_cmd_done_end(struct ata_channel *chp, struct ata_xfer *xfer)
1773 struct mvsata_port *mvport = (struct mvsata_port *)chp;
1774 struct ata_command *ata_c = xfer->c_cmd;
1776 /* EDMA restart, if enabled */
1777 if (mvport->port_edmamode != nodma) {
1778 mvsata_edma_reset_qptr(mvport);
1779 mvsata_edma_enable(mvport);
1782 ata_c->flags |= AT_DONE;
1783 ata_free_xfer(chp, xfer);
1784 if (ata_c->flags & AT_WAIT)
1785 wakeup(ata_c);
1786 else if (ata_c->callback)
1787 ata_c->callback(ata_c->callback_arg);
1788 atastart(chp);
1790 return;
1793 #if NATAPIBUS > 0
1794 static void
1795 mvsata_atapi_start(struct ata_channel *chp, struct ata_xfer *xfer)
1797 struct mvsata_softc *sc = (struct mvsata_softc *)chp->ch_atac;
1798 struct mvsata_port *mvport = (struct mvsata_port *)chp;
1799 struct atac_softc *atac = &sc->sc_wdcdev.sc_atac;
1800 struct scsipi_xfer *sc_xfer = xfer->c_cmd;
1801 struct ata_drive_datas *drvp = &chp->ch_drive[xfer->c_drive];
1802 const int wait_flags = (xfer->c_flags & C_POLL) ? AT_POLL : 0;
1803 const char *errstring;
1805 DPRINTFN(2, ("%s:%d:%d: mvsata_atapi_start: scsi flags 0x%x\n",
1806 device_xname(chp->ch_atac->atac_dev), chp->ch_channel,
1807 xfer->c_drive, sc_xfer->xs_control));
1809 if (mvport->port_edmamode != nodma)
1810 mvsata_edma_disable(mvport, 10 /* ms */, wait_flags);
1812 if ((xfer->c_flags & C_DMA) && (drvp->n_xfers <= NXFER))
1813 drvp->n_xfers++;
1815 /* Do control operations specially. */
1816 if (__predict_false(drvp->state < READY)) {
1817 /* If it's not a polled command, we need the kernel thread */
1818 if ((sc_xfer->xs_control & XS_CTL_POLL) == 0 && cpu_intr_p()) {
1819 chp->ch_queue->queue_freeze++;
1820 wakeup(&chp->ch_thread);
1821 return;
1824 * disable interrupts, all commands here should be quick
1825 * enouth to be able to poll, and we don't go here that often
1827 MVSATA_WDC_WRITE_1(mvport, SRB_CAS, WDCTL_4BIT | WDCTL_IDS);
1829 MVSATA_WDC_WRITE_1(mvport, SRB_H, WDSD_IBM);
1830 /* Don't try to set mode if controller can't be adjusted */
1831 if (atac->atac_set_modes == NULL)
1832 goto ready;
1833 /* Also don't try if the drive didn't report its mode */
1834 if ((drvp->drive_flags & DRIVE_MODE) == 0)
1835 goto ready;
1836 errstring = "unbusy";
1837 if (wdc_wait_for_unbusy(chp, ATAPI_DELAY, wait_flags))
1838 goto timeout;
1839 wdccommand(chp, drvp->drive, SET_FEATURES, 0, 0, 0,
1840 0x08 | drvp->PIO_mode, WDSF_SET_MODE);
1841 errstring = "piomode";
1842 if (wdc_wait_for_unbusy(chp, ATAPI_MODE_DELAY, wait_flags))
1843 goto timeout;
1844 if (chp->ch_status & WDCS_ERR) {
1845 if (chp->ch_error == WDCE_ABRT) {
1847 * Some ATAPI drives reject PIO settings.
1848 * Fall back to PIO mode 3 since that's the
1849 * minimum for ATAPI.
1851 aprint_error_dev(atac->atac_dev,
1852 "channel %d drive %d: PIO mode %d rejected,"
1853 " falling back to PIO mode 3\n",
1854 chp->ch_channel, xfer->c_drive,
1855 drvp->PIO_mode);
1856 if (drvp->PIO_mode > 3)
1857 drvp->PIO_mode = 3;
1858 } else
1859 goto error;
1861 if (drvp->drive_flags & DRIVE_UDMA)
1862 wdccommand(chp, drvp->drive, SET_FEATURES, 0, 0, 0,
1863 0x40 | drvp->UDMA_mode, WDSF_SET_MODE);
1864 else
1865 if (drvp->drive_flags & DRIVE_DMA)
1866 wdccommand(chp, drvp->drive, SET_FEATURES, 0, 0, 0,
1867 0x20 | drvp->DMA_mode, WDSF_SET_MODE);
1868 else
1869 goto ready;
1870 errstring = "dmamode";
1871 if (wdc_wait_for_unbusy(chp, ATAPI_MODE_DELAY, wait_flags))
1872 goto timeout;
1873 if (chp->ch_status & WDCS_ERR) {
1874 if (chp->ch_error == WDCE_ABRT) {
1875 if (drvp->drive_flags & DRIVE_UDMA)
1876 goto error;
1877 else {
1879 * The drive rejected our DMA setting.
1880 * Fall back to mode 1.
1882 aprint_error_dev(atac->atac_dev,
1883 "channel %d drive %d:"
1884 " DMA mode %d rejected,"
1885 " falling back to DMA mode 0\n",
1886 chp->ch_channel, xfer->c_drive,
1887 drvp->DMA_mode);
1888 if (drvp->DMA_mode > 0)
1889 drvp->DMA_mode = 0;
1891 } else
1892 goto error;
1894 ready:
1895 drvp->state = READY;
1896 MVSATA_WDC_WRITE_1(mvport, SRB_CAS, WDCTL_4BIT);
1897 delay(10); /* some drives need a little delay here */
1899 /* start timeout machinery */
1900 if ((sc_xfer->xs_control & XS_CTL_POLL) == 0)
1901 callout_reset(&chp->ch_callout, mstohz(sc_xfer->timeout),
1902 wdctimeout, chp);
1904 MVSATA_WDC_WRITE_1(mvport, SRB_H, WDSD_IBM);
1905 switch (wdc_wait_for_unbusy(chp, ATAPI_DELAY, wait_flags) < 0) {
1906 case WDCWAIT_OK:
1907 break;
1908 case WDCWAIT_TOUT:
1909 aprint_error_dev(atac->atac_dev, "not ready, st = %02x\n",
1910 chp->ch_status);
1911 sc_xfer->error = XS_TIMEOUT;
1912 mvsata_atapi_reset(chp, xfer);
1913 return;
1914 case WDCWAIT_THR:
1915 return;
1919 * Even with WDCS_ERR, the device should accept a command packet
1920 * Limit length to what can be stuffed into the cylinder register
1921 * (16 bits). Some CD-ROMs seem to interpret '0' as 65536,
1922 * but not all devices do that and it's not obvious from the
1923 * ATAPI spec that that behaviour should be expected. If more
1924 * data is necessary, multiple data transfer phases will be done.
1927 wdccommand(chp, xfer->c_drive, ATAPI_PKT_CMD,
1928 xfer->c_bcount <= 0xffff ? xfer->c_bcount : 0xffff, 0, 0, 0,
1929 (xfer->c_flags & C_DMA) ? ATAPI_PKT_CMD_FTRE_DMA : 0);
1932 * If there is no interrupt for CMD input, busy-wait for it (done in
1933 * the interrupt routine. If it is a polled command, call the interrupt
1934 * routine until command is done.
1936 if ((sc_xfer->xs_periph->periph_cap & ATAPI_CFG_DRQ_MASK) !=
1937 ATAPI_CFG_IRQ_DRQ || (sc_xfer->xs_control & XS_CTL_POLL)) {
1938 /* Wait for at last 400ns for status bit to be valid */
1939 DELAY(1);
1940 mvsata_atapi_intr(chp, xfer, 0);
1941 } else
1942 chp->ch_flags |= ATACH_IRQ_WAIT;
1943 if (sc_xfer->xs_control & XS_CTL_POLL) {
1944 if (chp->ch_flags & ATACH_DMA_WAIT) {
1945 wdc_dmawait(chp, xfer, sc_xfer->timeout);
1946 chp->ch_flags &= ~ATACH_DMA_WAIT;
1948 while ((sc_xfer->xs_status & XS_STS_DONE) == 0) {
1949 /* Wait for at last 400ns for status bit to be valid */
1950 DELAY(1);
1951 mvsata_atapi_intr(chp, xfer, 0);
1954 return;
1956 timeout:
1957 aprint_error_dev(atac->atac_dev, "channel %d drive %d: %s timed out\n",
1958 chp->ch_channel, xfer->c_drive, errstring);
1959 sc_xfer->error = XS_TIMEOUT;
1960 MVSATA_WDC_WRITE_1(mvport, SRB_CAS, WDCTL_4BIT);
1961 delay(10); /* some drives need a little delay here */
1962 mvsata_atapi_reset(chp, xfer);
1963 return;
1965 error:
1966 aprint_error_dev(atac->atac_dev,
1967 "channel %d drive %d: %s error (0x%x)\n",
1968 chp->ch_channel, xfer->c_drive, errstring, chp->ch_error);
1969 sc_xfer->error = XS_SHORTSENSE;
1970 sc_xfer->sense.atapi_sense = chp->ch_error;
1971 MVSATA_WDC_WRITE_1(mvport, SRB_CAS, WDCTL_4BIT);
1972 delay(10); /* some drives need a little delay here */
1973 mvsata_atapi_reset(chp, xfer);
1974 return;
1977 static int
1978 mvsata_atapi_intr(struct ata_channel *chp, struct ata_xfer *xfer, int irq)
1980 struct mvsata_port *mvport = (struct mvsata_port *)chp;
1981 struct atac_softc *atac = chp->ch_atac;
1982 struct wdc_softc *wdc = CHAN_TO_WDC(chp);
1983 struct scsipi_xfer *sc_xfer = xfer->c_cmd;
1984 struct ata_drive_datas *drvp = &chp->ch_drive[xfer->c_drive];
1985 int len, phase, ire, error, retries=0, i;
1986 void *cmd;
1988 DPRINTFN(1, ("%s:%d:%d: mvsata_atapi_intr\n",
1989 device_xname(atac->atac_dev), chp->ch_channel, xfer->c_drive));
1991 /* Is it not a transfer, but a control operation? */
1992 if (drvp->state < READY) {
1993 aprint_error_dev(atac->atac_dev,
1994 "channel %d drive %d: bad state %d\n",
1995 chp->ch_channel, xfer->c_drive, drvp->state);
1996 panic("mvsata_atapi_intr: bad state");
1999 * If we missed an interrupt in a PIO transfer, reset and restart.
2000 * Don't try to continue transfer, we may have missed cycles.
2002 if ((xfer->c_flags & (C_TIMEOU | C_DMA)) == C_TIMEOU) {
2003 sc_xfer->error = XS_TIMEOUT;
2004 mvsata_atapi_reset(chp, xfer);
2005 return 1;
2008 /* Ack interrupt done in wdc_wait_for_unbusy */
2009 MVSATA_WDC_WRITE_1(mvport, SRB_H, WDSD_IBM);
2010 if (wdc_wait_for_unbusy(chp,
2011 (irq == 0) ? sc_xfer->timeout : 0, AT_POLL) == WDCWAIT_TOUT) {
2012 if (irq && (xfer->c_flags & C_TIMEOU) == 0)
2013 return 0; /* IRQ was not for us */
2014 aprint_error_dev(atac->atac_dev,
2015 "channel %d: device timeout, c_bcount=%d, c_skip=%d\n",
2016 chp->ch_channel, xfer->c_bcount, xfer->c_skip);
2017 if (xfer->c_flags & C_DMA)
2018 ata_dmaerr(drvp,
2019 (xfer->c_flags & C_POLL) ? AT_POLL : 0);
2020 sc_xfer->error = XS_TIMEOUT;
2021 mvsata_atapi_reset(chp, xfer);
2022 return 1;
2026 * If we missed an IRQ and were using DMA, flag it as a DMA error
2027 * and reset device.
2029 if ((xfer->c_flags & C_TIMEOU) && (xfer->c_flags & C_DMA)) {
2030 ata_dmaerr(drvp, (xfer->c_flags & C_POLL) ? AT_POLL : 0);
2031 sc_xfer->error = XS_RESET;
2032 mvsata_atapi_reset(chp, xfer);
2033 return (1);
2036 * if the request sense command was aborted, report the short sense
2037 * previously recorded, else continue normal processing
2040 again:
2041 len = MVSATA_WDC_READ_1(mvport, SRB_LBAM) +
2042 256 * MVSATA_WDC_READ_1(mvport, SRB_LBAH);
2043 ire = MVSATA_WDC_READ_1(mvport, SRB_SC);
2044 phase = (ire & (WDCI_CMD | WDCI_IN)) | (chp->ch_status & WDCS_DRQ);
2045 DPRINTF((
2046 "mvsata_atapi_intr: c_bcount %d len %d st 0x%x err 0x%x ire 0x%x :",
2047 xfer->c_bcount, len, chp->ch_status, chp->ch_error, ire));
2049 switch (phase) {
2050 case PHASE_CMDOUT:
2051 cmd = sc_xfer->cmd;
2052 DPRINTF(("PHASE_CMDOUT\n"));
2053 /* Init the DMA channel if necessary */
2054 if (xfer->c_flags & C_DMA) {
2055 error = mvsata_bdma_init(mvport, sc_xfer,
2056 (char *)xfer->c_databuf + xfer->c_skip);
2057 if (error) {
2058 if (error == EINVAL) {
2060 * We can't do DMA on this transfer
2061 * for some reason. Fall back to PIO.
2063 xfer->c_flags &= ~C_DMA;
2064 error = 0;
2065 } else {
2066 sc_xfer->error = XS_DRIVER_STUFFUP;
2067 break;
2072 /* send packet command */
2073 /* Commands are 12 or 16 bytes long. It's 32-bit aligned */
2074 wdc->dataout_pio(chp, drvp->drive_flags, cmd, sc_xfer->cmdlen);
2076 /* Start the DMA channel if necessary */
2077 if (xfer->c_flags & C_DMA) {
2078 mvsata_bdma_start(mvport);
2079 chp->ch_flags |= ATACH_DMA_WAIT;
2082 if ((sc_xfer->xs_control & XS_CTL_POLL) == 0)
2083 chp->ch_flags |= ATACH_IRQ_WAIT;
2084 return 1;
2086 case PHASE_DATAOUT:
2087 /* write data */
2088 DPRINTF(("PHASE_DATAOUT\n"));
2089 if ((sc_xfer->xs_control & XS_CTL_DATA_OUT) == 0 ||
2090 (xfer->c_flags & C_DMA) != 0) {
2091 aprint_error_dev(atac->atac_dev,
2092 "channel %d drive %d: bad data phase DATAOUT\n",
2093 chp->ch_channel, xfer->c_drive);
2094 if (xfer->c_flags & C_DMA)
2095 ata_dmaerr(drvp,
2096 (xfer->c_flags & C_POLL) ? AT_POLL : 0);
2097 sc_xfer->error = XS_TIMEOUT;
2098 mvsata_atapi_reset(chp, xfer);
2099 return 1;
2101 xfer->c_lenoff = len - xfer->c_bcount;
2102 if (xfer->c_bcount < len) {
2103 aprint_error_dev(atac->atac_dev, "channel %d drive %d:"
2104 " warning: write only %d of %d requested bytes\n",
2105 chp->ch_channel, xfer->c_drive, xfer->c_bcount,
2106 len);
2107 len = xfer->c_bcount;
2110 wdc->dataout_pio(chp, drvp->drive_flags,
2111 (char *)xfer->c_databuf + xfer->c_skip, len);
2113 for (i = xfer->c_lenoff; i > 0; i -= 2)
2114 MVSATA_WDC_WRITE_2(mvport, SRB_PIOD, 0);
2116 xfer->c_skip += len;
2117 xfer->c_bcount -= len;
2118 if ((sc_xfer->xs_control & XS_CTL_POLL) == 0)
2119 chp->ch_flags |= ATACH_IRQ_WAIT;
2120 return 1;
2122 case PHASE_DATAIN:
2123 /* Read data */
2124 DPRINTF(("PHASE_DATAIN\n"));
2125 if ((sc_xfer->xs_control & XS_CTL_DATA_IN) == 0 ||
2126 (xfer->c_flags & C_DMA) != 0) {
2127 aprint_error_dev(atac->atac_dev,
2128 "channel %d drive %d: bad data phase DATAIN\n",
2129 chp->ch_channel, xfer->c_drive);
2130 if (xfer->c_flags & C_DMA)
2131 ata_dmaerr(drvp,
2132 (xfer->c_flags & C_POLL) ? AT_POLL : 0);
2133 sc_xfer->error = XS_TIMEOUT;
2134 mvsata_atapi_reset(chp, xfer);
2135 return 1;
2137 xfer->c_lenoff = len - xfer->c_bcount;
2138 if (xfer->c_bcount < len) {
2139 aprint_error_dev(atac->atac_dev, "channel %d drive %d:"
2140 " warning: reading only %d of %d bytes\n",
2141 chp->ch_channel, xfer->c_drive, xfer->c_bcount,
2142 len);
2143 len = xfer->c_bcount;
2146 wdc->datain_pio(chp, drvp->drive_flags,
2147 (char *)xfer->c_databuf + xfer->c_skip, len);
2149 if (xfer->c_lenoff > 0)
2150 wdcbit_bucket(chp, len - xfer->c_bcount);
2152 xfer->c_skip += len;
2153 xfer->c_bcount -= len;
2154 if ((sc_xfer->xs_control & XS_CTL_POLL) == 0)
2155 chp->ch_flags |= ATACH_IRQ_WAIT;
2156 return 1;
2158 case PHASE_ABORTED:
2159 case PHASE_COMPLETED:
2160 DPRINTF(("PHASE_COMPLETED\n"));
2161 if (xfer->c_flags & C_DMA)
2162 xfer->c_bcount -= sc_xfer->datalen;
2163 sc_xfer->resid = xfer->c_bcount;
2164 mvsata_atapi_phase_complete(xfer);
2165 return 1;
2167 default:
2168 if (++retries<500) {
2169 DELAY(100);
2170 chp->ch_status = MVSATA_WDC_READ_1(mvport, SRB_CS);
2171 chp->ch_error = MVSATA_WDC_READ_1(mvport, SRB_FE);
2172 goto again;
2174 aprint_error_dev(atac->atac_dev,
2175 "channel %d drive %d: unknown phase 0x%x\n",
2176 chp->ch_channel, xfer->c_drive, phase);
2177 if (chp->ch_status & WDCS_ERR) {
2178 sc_xfer->error = XS_SHORTSENSE;
2179 sc_xfer->sense.atapi_sense = chp->ch_error;
2180 } else {
2181 if (xfer->c_flags & C_DMA)
2182 ata_dmaerr(drvp,
2183 (xfer->c_flags & C_POLL) ? AT_POLL : 0);
2184 sc_xfer->error = XS_RESET;
2185 mvsata_atapi_reset(chp, xfer);
2186 return (1);
2189 DPRINTF(("mvsata_atapi_intr: mvsata_atapi_done() (end), error 0x%x "
2190 "sense 0x%x\n", sc_xfer->error, sc_xfer->sense.atapi_sense));
2191 mvsata_atapi_done(chp, xfer);
2192 return 1;
2195 static void
2196 mvsata_atapi_kill_xfer(struct ata_channel *chp, struct ata_xfer *xfer,
2197 int reason)
2199 struct mvsata_port *mvport = (struct mvsata_port *)chp;
2200 struct scsipi_xfer *sc_xfer = xfer->c_cmd;
2202 /* remove this command from xfer queue */
2203 switch (reason) {
2204 case KILL_GONE:
2205 sc_xfer->error = XS_DRIVER_STUFFUP;
2206 break;
2208 case KILL_RESET:
2209 sc_xfer->error = XS_RESET;
2210 break;
2212 default:
2213 aprint_error_dev(MVSATA_DEV2(mvport),
2214 "mvsata_atapi_kill_xfer: unknown reason %d\n", reason);
2215 panic("mvsata_atapi_kill_xfer");
2217 ata_free_xfer(chp, xfer);
2218 scsipi_done(sc_xfer);
2221 static void
2222 mvsata_atapi_reset(struct ata_channel *chp, struct ata_xfer *xfer)
2224 struct atac_softc *atac = chp->ch_atac;
2225 struct ata_drive_datas *drvp = &chp->ch_drive[xfer->c_drive];
2226 struct scsipi_xfer *sc_xfer = xfer->c_cmd;
2228 wdccommandshort(chp, xfer->c_drive, ATAPI_SOFT_RESET);
2229 drvp->state = 0;
2230 if (wdc_wait_for_unbusy(chp, WDC_RESET_WAIT, AT_POLL) != 0) {
2231 printf("%s:%d:%d: reset failed\n", device_xname(atac->atac_dev),
2232 chp->ch_channel, xfer->c_drive);
2233 sc_xfer->error = XS_SELTIMEOUT;
2235 mvsata_atapi_done(chp, xfer);
2236 return;
2239 static void
2240 mvsata_atapi_phase_complete(struct ata_xfer *xfer)
2242 struct ata_channel *chp = xfer->c_chp;
2243 struct atac_softc *atac = chp->ch_atac;
2244 struct wdc_softc *wdc = CHAN_TO_WDC(chp);
2245 struct scsipi_xfer *sc_xfer = xfer->c_cmd;
2246 struct ata_drive_datas *drvp = &chp->ch_drive[xfer->c_drive];
2248 /* wait for DSC if needed */
2249 if (drvp->drive_flags & DRIVE_ATAPIST) {
2250 DPRINTFN(1,
2251 ("%s:%d:%d: mvsata_atapi_phase_complete: polldsc %d\n",
2252 device_xname(atac->atac_dev), chp->ch_channel,
2253 xfer->c_drive, xfer->c_dscpoll));
2254 if (cold)
2255 panic("mvsata_atapi_phase_complete: cold");
2257 if (wdcwait(chp, WDCS_DSC, WDCS_DSC, 10, AT_POLL) ==
2258 WDCWAIT_TOUT) {
2259 /* 10ms not enough, try again in 1 tick */
2260 if (xfer->c_dscpoll++ > mstohz(sc_xfer->timeout)) {
2261 aprint_error_dev(atac->atac_dev,
2262 "channel %d: wait_for_dsc failed\n",
2263 chp->ch_channel);
2264 sc_xfer->error = XS_TIMEOUT;
2265 mvsata_atapi_reset(chp, xfer);
2266 return;
2267 } else
2268 callout_reset(&chp->ch_callout, 1,
2269 mvsata_atapi_polldsc, xfer);
2270 return;
2275 * Some drive occasionally set WDCS_ERR with
2276 * "ATA illegal length indication" in the error
2277 * register. If we read some data the sense is valid
2278 * anyway, so don't report the error.
2280 if (chp->ch_status & WDCS_ERR &&
2281 ((sc_xfer->xs_control & XS_CTL_REQSENSE) == 0 ||
2282 sc_xfer->resid == sc_xfer->datalen)) {
2283 /* save the short sense */
2284 sc_xfer->error = XS_SHORTSENSE;
2285 sc_xfer->sense.atapi_sense = chp->ch_error;
2286 if ((sc_xfer->xs_periph->periph_quirks & PQUIRK_NOSENSE) == 0) {
2287 /* ask scsipi to send a REQUEST_SENSE */
2288 sc_xfer->error = XS_BUSY;
2289 sc_xfer->status = SCSI_CHECK;
2290 } else
2291 if (wdc->dma_status & (WDC_DMAST_NOIRQ | WDC_DMAST_ERR)) {
2292 ata_dmaerr(drvp,
2293 (xfer->c_flags & C_POLL) ? AT_POLL : 0);
2294 sc_xfer->error = XS_RESET;
2295 mvsata_atapi_reset(chp, xfer);
2296 return;
2299 if (xfer->c_bcount != 0)
2300 DPRINTFN(1, ("%s:%d:%d: mvsata_atapi_intr:"
2301 " bcount value is %d after io\n",
2302 device_xname(atac->atac_dev), chp->ch_channel,
2303 xfer->c_drive, xfer->c_bcount));
2304 #ifdef DIAGNOSTIC
2305 if (xfer->c_bcount < 0)
2306 aprint_error_dev(atac->atac_dev,
2307 "channel %d drive %d: mvsata_atapi_intr:"
2308 " warning: bcount value is %d after io\n",
2309 chp->ch_channel, xfer->c_drive, xfer->c_bcount);
2310 #endif
2312 DPRINTFN(1, ("%s:%d:%d: mvsata_atapi_phase_complete:"
2313 " mvsata_atapi_done(), error 0x%x sense 0x%x\n",
2314 device_xname(atac->atac_dev), chp->ch_channel, xfer->c_drive,
2315 sc_xfer->error, sc_xfer->sense.atapi_sense));
2316 mvsata_atapi_done(chp, xfer);
2319 static void
2320 mvsata_atapi_done(struct ata_channel *chp, struct ata_xfer *xfer)
2322 struct atac_softc *atac = chp->ch_atac;
2323 struct scsipi_xfer *sc_xfer = xfer->c_cmd;
2324 int drive = xfer->c_drive;
2326 DPRINTFN(1, ("%s:%d:%d: mvsata_atapi_done: flags 0x%x\n",
2327 device_xname(atac->atac_dev), chp->ch_channel, xfer->c_drive,
2328 (u_int)xfer->c_flags));
2329 callout_stop(&chp->ch_callout);
2330 /* mark controller inactive and free the command */
2331 chp->ch_queue->active_xfer = NULL;
2332 ata_free_xfer(chp, xfer);
2334 if (chp->ch_drive[drive].drive_flags & DRIVE_WAITDRAIN) {
2335 sc_xfer->error = XS_DRIVER_STUFFUP;
2336 chp->ch_drive[drive].drive_flags &= ~DRIVE_WAITDRAIN;
2337 wakeup(&chp->ch_queue->active_xfer);
2340 DPRINTFN(1, ("%s:%d: mvsata_atapi_done: scsipi_done\n",
2341 device_xname(atac->atac_dev), chp->ch_channel));
2342 scsipi_done(sc_xfer);
2343 DPRINTFN(1, ("%s:%d: atastart from wdc_atapi_done, flags 0x%x\n",
2344 device_xname(atac->atac_dev), chp->ch_channel, chp->ch_flags));
2345 atastart(chp);
2348 static void
2349 mvsata_atapi_polldsc(void *arg)
2352 mvsata_atapi_phase_complete(arg);
2354 #endif /* NATAPIBUS > 0 */
2358 * XXXX: Shall we need lock for race condition in mvsata_edma_inqueue{,_gen2}(),
2359 * if supported queuing command by atabus? The race condition will not happen
2360 * if this is called only to the thread of atabus.
2362 static int
2363 mvsata_edma_inqueue(struct mvsata_port *mvport, struct ata_bio *ata_bio,
2364 void *databuf)
2366 struct mvsata_softc *sc = device_private(MVSATA_DEV2(mvport));
2367 struct ata_channel *chp = &mvport->port_ata_channel;
2368 struct eprd *eprd;
2369 bus_addr_t crqb_base_addr;
2370 bus_dmamap_t data_dmamap;
2371 uint32_t reg;
2372 int quetag, erqqip, erqqop, next, rv, i;
2374 DPRINTFN(2, ("%s:%d:%d: mvsata_edma_inqueue:"
2375 " blkno=0x%llx, nbytes=%d, flags=0x%x\n",
2376 device_xname(MVSATA_DEV2(mvport)), mvport->port_hc->hc,
2377 mvport->port, ata_bio->blkno, ata_bio->nbytes, ata_bio->flags));
2379 reg = MVSATA_EDMA_READ_4(mvport, EDMA_REQQOP);
2380 erqqop = (reg & EDMA_REQQP_ERQQP_MASK) >> EDMA_REQQP_ERQQP_SHIFT;
2381 reg = MVSATA_EDMA_READ_4(mvport, EDMA_REQQIP);
2382 erqqip = (reg & EDMA_REQQP_ERQQP_MASK) >> EDMA_REQQP_ERQQP_SHIFT;
2383 next = erqqip;
2384 MVSATA_EDMAQ_INC(next);
2385 if (next == erqqop)
2386 /* queue full */
2387 return EBUSY;
2388 if ((quetag = mvsata_quetag_get(mvport)) == -1)
2389 /* tag nothing */
2390 return EBUSY;
2391 DPRINTFN(2, (" erqqip=%d, quetag=%d\n", erqqip, quetag));
2393 rv = mvsata_dma_bufload(mvport, quetag, databuf, ata_bio->nbytes,
2394 ata_bio->flags);
2395 if (rv != 0)
2396 return rv;
2398 mvport->port_reqtbl[quetag].xfer = chp->ch_queue->active_xfer;
2400 /* setup EDMA Physical Region Descriptors (ePRD) Table Data */
2401 data_dmamap = mvport->port_reqtbl[quetag].data_dmamap;
2402 eprd = mvport->port_reqtbl[quetag].eprd;
2403 for (i = 0; i < data_dmamap->dm_nsegs; i++) {
2404 bus_addr_t ds_addr = data_dmamap->dm_segs[i].ds_addr;
2405 bus_size_t ds_len = data_dmamap->dm_segs[i].ds_len;
2407 eprd->prdbal = htole32(ds_addr & EPRD_PRDBAL_MASK);
2408 eprd->bytecount = htole32(EPRD_BYTECOUNT(ds_len));
2409 eprd->eot = htole16(0);
2410 eprd->prdbah = htole32((ds_addr >> 16) >> 16);
2411 eprd++;
2413 (eprd - 1)->eot |= htole16(EPRD_EOT);
2414 #ifdef MVSATA_DEBUG
2415 if (mvsata_debug >= 3)
2416 mvsata_print_eprd(mvport, quetag);
2417 #endif
2418 bus_dmamap_sync(mvport->port_dmat, mvport->port_eprd_dmamap,
2419 mvport->port_reqtbl[quetag].eprd_offset, MVSATA_EPRD_MAX_SIZE,
2420 BUS_DMASYNC_PREWRITE);
2422 /* setup EDMA Command Request Block (CRQB) Data */
2423 sc->sc_edma_setup_crqb(mvport, erqqip, quetag, ata_bio);
2424 #ifdef MVSATA_DEBUG
2425 if (mvsata_debug >= 3)
2426 mvsata_print_crqb(mvport, erqqip);
2427 #endif
2428 bus_dmamap_sync(mvport->port_dmat, mvport->port_crqb_dmamap,
2429 erqqip * sizeof(union mvsata_crqb),
2430 sizeof(union mvsata_crqb), BUS_DMASYNC_PREWRITE);
2432 MVSATA_EDMAQ_INC(erqqip);
2434 crqb_base_addr = mvport->port_crqb_dmamap->dm_segs[0].ds_addr &
2435 (EDMA_REQQP_ERQQBAP_MASK | EDMA_REQQP_ERQQBA_MASK);
2436 MVSATA_EDMA_WRITE_4(mvport, EDMA_REQQBAH, (crqb_base_addr >> 16) >> 16);
2437 MVSATA_EDMA_WRITE_4(mvport, EDMA_REQQIP,
2438 crqb_base_addr | (erqqip << EDMA_REQQP_ERQQP_SHIFT));
2440 return 0;
2443 static int
2444 mvsata_edma_handle(struct mvsata_port *mvport, struct ata_xfer *xfer1)
2446 struct ata_channel *chp = &mvport->port_ata_channel;
2447 struct crpb *crpb;
2448 struct ata_bio *ata_bio;
2449 struct ata_xfer *xfer;
2450 uint32_t reg;
2451 int erqqip, erqqop, erpqip, erpqop, prev_erpqop, quetag, handled = 0, n;
2453 /* First, Sync for Request Queue buffer */
2454 reg = MVSATA_EDMA_READ_4(mvport, EDMA_REQQOP);
2455 erqqop = (reg & EDMA_REQQP_ERQQP_MASK) >> EDMA_REQQP_ERQQP_SHIFT;
2456 if (mvport->port_prev_erqqop != erqqop) {
2457 const int s = sizeof(union mvsata_crqb);
2459 if (mvport->port_prev_erqqop < erqqop)
2460 n = erqqop - mvport->port_prev_erqqop;
2461 else {
2462 if (erqqop > 0)
2463 bus_dmamap_sync(mvport->port_dmat,
2464 mvport->port_crqb_dmamap, 0, erqqop * s,
2465 BUS_DMASYNC_POSTWRITE);
2466 n = MVSATA_EDMAQ_LEN - mvport->port_prev_erqqop;
2468 if (n > 0)
2469 bus_dmamap_sync(mvport->port_dmat,
2470 mvport->port_crqb_dmamap,
2471 mvport->port_prev_erqqop * s, n * s,
2472 BUS_DMASYNC_POSTWRITE);
2473 mvport->port_prev_erqqop = erqqop;
2476 reg = MVSATA_EDMA_READ_4(mvport, EDMA_RESQIP);
2477 erpqip = (reg & EDMA_RESQP_ERPQP_MASK) >> EDMA_RESQP_ERPQP_SHIFT;
2478 reg = MVSATA_EDMA_READ_4(mvport, EDMA_RESQOP);
2479 erpqop = (reg & EDMA_RESQP_ERPQP_MASK) >> EDMA_RESQP_ERPQP_SHIFT;
2481 DPRINTFN(3, ("%s:%d:%d: mvsata_edma_handle: erpqip=%d, erpqop=%d\n",
2482 device_xname(MVSATA_DEV2(mvport)), mvport->port_hc->hc,
2483 mvport->port, erpqip, erpqop));
2485 if (erpqop == erpqip)
2486 return 0;
2488 if (erpqop < erpqip)
2489 n = erpqip - erpqop;
2490 else {
2491 if (erpqip > 0)
2492 bus_dmamap_sync(mvport->port_dmat,
2493 mvport->port_crpb_dmamap,
2494 0, erpqip * sizeof(struct crpb),
2495 BUS_DMASYNC_POSTREAD);
2496 n = MVSATA_EDMAQ_LEN - erpqop;
2498 if (n > 0)
2499 bus_dmamap_sync(mvport->port_dmat, mvport->port_crpb_dmamap,
2500 erpqop * sizeof(struct crpb),
2501 n * sizeof(struct crpb), BUS_DMASYNC_POSTREAD);
2503 prev_erpqop = erpqop;
2504 while (erpqop != erpqip) {
2505 #ifdef MVSATA_DEBUG
2506 if (mvsata_debug >= 3)
2507 mvsata_print_crpb(mvport, erpqop);
2508 #endif
2509 crpb = mvport->port_crpb + erpqop;
2510 quetag = CRPB_CHOSTQUETAG(le16toh(crpb->id));
2511 xfer = chp->ch_queue->active_xfer =
2512 mvport->port_reqtbl[quetag].xfer;
2513 #ifdef DIAGNOSTIC
2514 if (xfer == NULL)
2515 panic("unknwon response received: %s:%d:%d: tag 0x%x\n",
2516 device_xname(MVSATA_DEV2(mvport)),
2517 mvport->port_hc->hc, mvport->port, quetag);
2518 #endif
2520 bus_dmamap_sync(mvport->port_dmat, mvport->port_eprd_dmamap,
2521 mvport->port_reqtbl[quetag].eprd_offset,
2522 MVSATA_EPRD_MAX_SIZE, BUS_DMASYNC_POSTWRITE);
2524 chp->ch_status = CRPB_CDEVSTS(le16toh(crpb->rspflg));
2525 chp->ch_error = CRPB_CEDMASTS(le16toh(crpb->rspflg));
2526 ata_bio = xfer->c_cmd;
2527 ata_bio->error = NOERROR;
2528 ata_bio->r_error = 0;
2529 if (chp->ch_status & WDCS_ERR)
2530 ata_bio->error = ERROR;
2531 if (chp->ch_status & WDCS_BSY)
2532 ata_bio->error = TIMEOUT;
2533 if (chp->ch_error)
2534 ata_bio->error = ERR_DMA;
2536 mvsata_dma_bufunload(mvport, quetag, ata_bio->flags);
2537 mvport->port_reqtbl[quetag].xfer = NULL;
2538 mvsata_quetag_put(mvport, quetag);
2539 MVSATA_EDMAQ_INC(erpqop);
2541 #if 1 /* XXXX: flags clears here, because necessary the atabus layer. */
2542 erqqip = (MVSATA_EDMA_READ_4(mvport, EDMA_REQQIP) &
2543 EDMA_REQQP_ERQQP_MASK) >> EDMA_REQQP_ERQQP_SHIFT;
2544 if (erpqop == erqqip)
2545 chp->ch_flags &= ~(ATACH_DMA_WAIT | ATACH_IRQ_WAIT);
2546 #endif
2547 mvsata_bio_intr(chp, xfer, 1);
2548 if (xfer1 == NULL)
2549 handled++;
2550 else if (xfer == xfer1) {
2551 handled = 1;
2552 break;
2555 if (prev_erpqop < erpqop)
2556 n = erpqop - prev_erpqop;
2557 else {
2558 if (erpqop > 0)
2559 bus_dmamap_sync(mvport->port_dmat,
2560 mvport->port_crpb_dmamap, 0,
2561 erpqop * sizeof(struct crpb), BUS_DMASYNC_PREREAD);
2562 n = MVSATA_EDMAQ_LEN - prev_erpqop;
2564 if (n > 0)
2565 bus_dmamap_sync(mvport->port_dmat, mvport->port_crpb_dmamap,
2566 prev_erpqop * sizeof(struct crpb),
2567 n * sizeof(struct crpb), BUS_DMASYNC_PREREAD);
2569 reg &= ~EDMA_RESQP_ERPQP_MASK;
2570 reg |= (erpqop << EDMA_RESQP_ERPQP_SHIFT);
2571 MVSATA_EDMA_WRITE_4(mvport, EDMA_RESQOP, reg);
2573 #if 0 /* already cleared ago? */
2574 erqqip = (MVSATA_EDMA_READ_4(mvport, EDMA_REQQIP) &
2575 EDMA_REQQP_ERQQP_MASK) >> EDMA_REQQP_ERQQP_SHIFT;
2576 if (erpqop == erqqip)
2577 chp->ch_flags &= ~(ATACH_DMA_WAIT | ATACH_IRQ_WAIT);
2578 #endif
2580 return handled;
2583 static int
2584 mvsata_edma_wait(struct mvsata_port *mvport, struct ata_xfer *xfer, int timeout)
2586 struct ata_bio *ata_bio = xfer->c_cmd;
2587 int xtime;
2589 for (xtime = 0; xtime < timeout / 10; xtime++) {
2590 if (mvsata_edma_handle(mvport, xfer))
2591 return 0;
2592 if (ata_bio->flags & ATA_NOSLEEP)
2593 delay(10000);
2594 else
2595 tsleep(&xfer, PRIBIO, "mvsataipl", mstohz(10));
2598 DPRINTF(("mvsata_edma_wait: timeout: %p\n", xfer));
2599 mvsata_edma_rqq_remove(mvport, xfer);
2600 xfer->c_flags |= C_TIMEOU;
2601 return 1;
2604 static void
2605 mvsata_edma_timeout(void *arg)
2607 struct ata_xfer *xfer = (struct ata_xfer *)arg;
2608 struct ata_channel *chp = xfer->c_chp;
2609 struct mvsata_port *mvport = (struct mvsata_port *)chp;
2610 int s;
2612 s = splbio();
2613 DPRINTF(("mvsata_edma_timeout: %p\n", xfer));
2614 if ((chp->ch_flags & ATACH_IRQ_WAIT) != 0) {
2615 mvsata_edma_rqq_remove(mvport, xfer);
2616 xfer->c_flags |= C_TIMEOU;
2617 mvsata_bio_intr(chp, xfer, 1);
2619 splx(s);
2622 static void
2623 mvsata_edma_rqq_remove(struct mvsata_port *mvport, struct ata_xfer *xfer)
2625 struct mvsata_softc *sc = device_private(MVSATA_DEV2(mvport));
2626 struct ata_bio *ata_bio;
2627 bus_addr_t crqb_base_addr;
2628 int erqqip, i;
2630 /* First, hardware reset, stop EDMA */
2631 mvsata_hreset_port(mvport);
2633 /* cleanup completed EDMA safely */
2634 mvsata_edma_handle(mvport, NULL);
2636 bus_dmamap_sync(mvport->port_dmat, mvport->port_crqb_dmamap, 0,
2637 sizeof(union mvsata_crqb) * MVSATA_EDMAQ_LEN, BUS_DMASYNC_PREWRITE);
2638 for (i = 0, erqqip = 0; i < MVSATA_EDMAQ_LEN; i++) {
2639 if (mvport->port_reqtbl[i].xfer == NULL)
2640 continue;
2642 ata_bio = mvport->port_reqtbl[i].xfer->c_cmd;
2643 if (mvport->port_reqtbl[i].xfer == xfer) {
2644 /* remove xfer from EDMA request queue */
2645 bus_dmamap_sync(mvport->port_dmat,
2646 mvport->port_eprd_dmamap,
2647 mvport->port_reqtbl[i].eprd_offset,
2648 MVSATA_EPRD_MAX_SIZE, BUS_DMASYNC_POSTWRITE);
2649 mvsata_dma_bufunload(mvport, i, ata_bio->flags);
2650 mvport->port_reqtbl[i].xfer = NULL;
2651 mvsata_quetag_put(mvport, i);
2652 continue;
2655 sc->sc_edma_setup_crqb(mvport, erqqip, i, ata_bio);
2656 erqqip++;
2658 bus_dmamap_sync(mvport->port_dmat, mvport->port_crqb_dmamap, 0,
2659 sizeof(union mvsata_crqb) * MVSATA_EDMAQ_LEN,
2660 BUS_DMASYNC_POSTWRITE);
2662 mvsata_edma_config(mvport, mvport->port_edmamode);
2663 mvsata_edma_reset_qptr(mvport);
2664 mvsata_edma_enable(mvport);
2666 crqb_base_addr = mvport->port_crqb_dmamap->dm_segs[0].ds_addr &
2667 (EDMA_REQQP_ERQQBAP_MASK | EDMA_REQQP_ERQQBA_MASK);
2668 MVSATA_EDMA_WRITE_4(mvport, EDMA_REQQBAH, (crqb_base_addr >> 16) >> 16);
2669 MVSATA_EDMA_WRITE_4(mvport, EDMA_REQQIP,
2670 crqb_base_addr | (erqqip << EDMA_REQQP_ERQQP_SHIFT));
2673 #if NATAPIBUS > 0
2674 static int
2675 mvsata_bdma_init(struct mvsata_port *mvport, struct scsipi_xfer *sc_xfer,
2676 void *databuf)
2678 struct mvsata_softc *sc = device_private(MVSATA_DEV2(mvport));
2679 struct eprd *eprd;
2680 bus_dmamap_t data_dmamap;
2681 bus_addr_t eprd_addr;
2682 int quetag, rv;
2684 DPRINTFN(2,
2685 ("%s:%d:%d: mvsata_bdma_init: datalen=%d, xs_control=0x%x\n",
2686 device_xname(MVSATA_DEV2(mvport)), mvport->port_hc->hc,
2687 mvport->port, sc_xfer->datalen, sc_xfer->xs_control));
2689 if ((quetag = mvsata_quetag_get(mvport)) == -1)
2690 /* tag nothing */
2691 return EBUSY;
2692 DPRINTFN(2, (" quetag=%d\n", quetag));
2694 rv = mvsata_dma_bufload(mvport, quetag, databuf, sc_xfer->datalen,
2695 sc_xfer->xs_control & XS_CTL_DATA_IN ? ATA_READ : 0);
2696 if (rv != 0)
2697 return rv;
2699 mvport->port_reqtbl[quetag].xfer = chp->ch_queue->active_xfer;
2701 /* setup EDMA Physical Region Descriptors (ePRD) Table Data */
2702 data_dmamap = mvport->port_reqtbl[quetag].data_dmamap;
2703 eprd = mvport->port_reqtbl[quetag].eprd;
2704 for (i = 0; i < data_dmamap->dm_nsegs; i++) {
2705 bus_addr_t ds_addr = data_dmamap->dm_segs[i].ds_addr;
2706 bus_size_t ds_len = data_dmamap->dm_segs[i].ds_len;
2708 eprd->prdbal = htole32(ds_addr & EPRD_PRDBAL_MASK);
2709 eprd->bytecount = htole32(EPRD_BYTECOUNT(ds_len));
2710 eprd->eot = htole16(0);
2711 eprd->prdbah = htole32((ds_addr >> 16) >> 16);
2712 eprd++;
2714 (eprd - 1)->eot |= htole16(EPRD_EOT);
2715 #ifdef MVSATA_DEBUG
2716 if (mvsata_debug >= 3)
2717 mvsata_print_eprd(mvport, quetag);
2718 #endif
2719 bus_dmamap_sync(mvport->port_dmat, mvport->port_eprd_dmamap,
2720 mvport->port_reqtbl[quetag].eprd_offset, MVSATA_EPRD_MAX_SIZE,
2721 BUS_DMASYNC_PREWRITE);
2722 eprd_addr = mvport->port_eprd_dmamap->dm_segs[0].ds_addr +
2723 mvport->port_reqtbl[quetag].eprd_offset;
2725 MVSATA_EDMA_WRITE_4(mvport, DMA_DTLBA, eprd_addr & DMA_DTLBA_MASK);
2726 MVSATA_EDMA_WRITE_4(mvport, DMA_DTHBA, (eprd_addr >> 16) >> 16);
2728 if (sc_xfer->xs_control & XS_CTL_DATA_IN)
2729 MVSATA_EDMA_WRITE_4(mvport, DMA_C, DMA_C_READ);
2730 else
2731 MVSATA_EDMA_WRITE_4(mvport, DMA_C, 0);
2733 return 0;
2736 static void
2737 mvsata_bdma_start(struct mvsata_port *mvport)
2740 #ifdef MVSATA_DEBUG
2741 if (mvsata_debug >= 3)
2742 mvsata_print_eprd(mvport, 0);
2743 #endif
2745 MVSATA_EDMA_WRITE_4(mvport, DMA_C,
2746 MVSATA_EDMA_READ_4(mvport, DMA_C) | DMA_C_START);
2748 #endif
2749 #endif
2752 static int
2753 mvsata_port_init(struct mvsata_hc *mvhc, int port)
2755 struct mvsata_softc *sc = mvhc->hc_sc;
2756 struct mvsata_port *mvport;
2757 struct ata_channel *chp;
2758 int channel, rv, i;
2759 const int crqbq_size = sizeof(union mvsata_crqb) * MVSATA_EDMAQ_LEN;
2760 const int crpbq_size = sizeof(struct crpb) * MVSATA_EDMAQ_LEN;
2761 const int eprd_buf_size = MVSATA_EPRD_MAX_SIZE * MVSATA_EDMAQ_LEN;
2763 mvport = malloc(sizeof(struct mvsata_port), M_DEVBUF,
2764 M_ZERO | M_NOWAIT);
2765 if (mvport == NULL) {
2766 aprint_error("%s:%d: can't allocate memory for port %d\n",
2767 device_xname(MVSATA_DEV(sc)), mvhc->hc, port);
2768 return ENOMEM;
2771 mvport->port = port;
2772 mvport->port_hc = mvhc;
2773 mvport->port_edmamode = nodma;
2775 rv = bus_space_subregion(mvhc->hc_iot, mvhc->hc_ioh,
2776 EDMA_REGISTERS_OFFSET + port * EDMA_REGISTERS_SIZE,
2777 EDMA_REGISTERS_SIZE, &mvport->port_ioh);
2778 if (rv != 0) {
2779 aprint_error("%s:%d: can't subregion EDMA %d registers\n",
2780 device_xname(MVSATA_DEV(sc)), mvhc->hc, port);
2781 goto fail0;
2783 mvport->port_iot = mvhc->hc_iot;
2784 rv = bus_space_subregion(mvport->port_iot, mvport->port_ioh, SATA_SS, 4,
2785 &mvport->port_sata_sstatus);
2786 if (rv != 0) {
2787 aprint_error("%s:%d:%d: couldn't subregion sstatus regs\n",
2788 device_xname(MVSATA_DEV(sc)), mvhc->hc, port);
2789 goto fail0;
2791 rv = bus_space_subregion(mvport->port_iot, mvport->port_ioh, SATA_SE, 4,
2792 &mvport->port_sata_serror);
2793 if (rv != 0) {
2794 aprint_error("%s:%d:%d: couldn't subregion serror regs\n",
2795 device_xname(MVSATA_DEV(sc)), mvhc->hc, port);
2796 goto fail0;
2798 if (sc->sc_rev == gen1)
2799 rv = bus_space_subregion(mvhc->hc_iot, mvhc->hc_ioh,
2800 SATAHC_I_R02(port), 4, &mvport->port_sata_scontrol);
2801 else
2802 rv = bus_space_subregion(mvport->port_iot, mvport->port_ioh,
2803 SATA_SC, 4, &mvport->port_sata_scontrol);
2804 if (rv != 0) {
2805 aprint_error("%s:%d:%d: couldn't subregion scontrol regs\n",
2806 device_xname(MVSATA_DEV(sc)), mvhc->hc, port);
2807 goto fail0;
2809 mvport->port_dmat = sc->sc_dmat;
2810 #ifndef MVSATA_WITHOUTDMA
2811 mvsata_quetag_init(mvport);
2812 #endif
2813 mvhc->hc_ports[port] = mvport;
2815 channel = mvhc->hc * sc->sc_port + port;
2816 chp = &mvport->port_ata_channel;
2817 chp->ch_channel = channel;
2818 chp->ch_atac = &sc->sc_wdcdev.sc_atac;
2819 chp->ch_ndrive = 1; /* SATA is always 1 drive */
2820 chp->ch_queue = &mvport->port_ata_queue;
2821 sc->sc_ata_channels[channel] = chp;
2823 rv = mvsata_wdc_reg_init(mvport, sc->sc_wdcdev.regs + channel);
2824 if (rv != 0)
2825 goto fail0;
2827 rv = bus_dmamap_create(mvport->port_dmat, crqbq_size, 1, crqbq_size, 0,
2828 BUS_DMA_NOWAIT, &mvport->port_crqb_dmamap);
2829 if (rv != 0) {
2830 aprint_error(
2831 "%s:%d:%d: EDMA CRQB map create failed: error=%d\n",
2832 device_xname(MVSATA_DEV(sc)), mvhc->hc, port, rv);
2833 goto fail0;
2835 rv = bus_dmamap_create(mvport->port_dmat, crpbq_size, 1, crpbq_size, 0,
2836 BUS_DMA_NOWAIT, &mvport->port_crpb_dmamap);
2837 if (rv != 0) {
2838 aprint_error(
2839 "%s:%d:%d: EDMA CRPB map create failed: error=%d\n",
2840 device_xname(MVSATA_DEV(sc)), mvhc->hc, port, rv);
2841 goto fail1;
2843 rv = bus_dmamap_create(mvport->port_dmat, eprd_buf_size, 1,
2844 eprd_buf_size, 0, BUS_DMA_NOWAIT, &mvport->port_eprd_dmamap);
2845 if (rv != 0) {
2846 aprint_error(
2847 "%s:%d:%d: EDMA ePRD buffer map create failed: error=%d\n",
2848 device_xname(MVSATA_DEV(sc)), mvhc->hc, port, rv);
2849 goto fail2;
2851 for (i = 0; i < MVSATA_EDMAQ_LEN; i++) {
2852 rv = bus_dmamap_create(mvport->port_dmat, MAXPHYS,
2853 MAXPHYS / PAGE_SIZE, MAXPHYS, 0, BUS_DMA_NOWAIT,
2854 &mvport->port_reqtbl[i].data_dmamap);
2855 if (rv != 0) {
2856 aprint_error("%s:%d:%d:"
2857 " EDMA data map(%d) create failed: error=%d\n",
2858 device_xname(MVSATA_DEV(sc)), mvhc->hc, port, i,
2859 rv);
2860 goto fail3;
2864 return 0;
2866 fail3:
2867 for (i--; i >= 0; i--)
2868 bus_dmamap_destroy(mvport->port_dmat,
2869 mvport->port_reqtbl[i].data_dmamap);
2870 bus_dmamap_destroy(mvport->port_dmat, mvport->port_eprd_dmamap);
2871 fail2:
2872 bus_dmamap_destroy(mvport->port_dmat, mvport->port_crpb_dmamap);
2873 fail1:
2874 bus_dmamap_destroy(mvport->port_dmat, mvport->port_crqb_dmamap);
2875 fail0:
2876 return rv;
2879 static int
2880 mvsata_wdc_reg_init(struct mvsata_port *mvport, struct wdc_regs *wdr)
2882 int hc, port, rv, i;
2884 hc = mvport->port_hc->hc;
2885 port = mvport->port;
2887 /* Create subregion for Shadow Registers Map */
2888 rv = bus_space_subregion(mvport->port_iot, mvport->port_ioh,
2889 SHADOW_REG_BLOCK_OFFSET, SHADOW_REG_BLOCK_SIZE, &wdr->cmd_baseioh);
2890 if (rv != 0) {
2891 aprint_error("%s:%d:%d: couldn't subregion shadow block regs\n",
2892 device_xname(MVSATA_DEV2(mvport)), hc, port);
2893 return rv;
2895 wdr->cmd_iot = mvport->port_iot;
2897 /* Once create subregion for each command registers */
2898 for (i = 0; i < WDC_NREG; i++) {
2899 rv = bus_space_subregion(wdr->cmd_iot, wdr->cmd_baseioh,
2900 i * 4, sizeof(uint32_t), &wdr->cmd_iohs[i]);
2901 if (rv != 0) {
2902 aprint_error("%s:%d:%d: couldn't subregion cmd regs\n",
2903 device_xname(MVSATA_DEV2(mvport)), hc, port);
2904 return rv;
2907 /* Create subregion for Alternate Status register */
2908 rv = bus_space_subregion(wdr->cmd_iot, wdr->cmd_baseioh,
2909 i * 4, sizeof(uint32_t), &wdr->ctl_ioh);
2910 if (rv != 0) {
2911 aprint_error("%s:%d:%d: couldn't subregion cmd regs\n",
2912 device_xname(MVSATA_DEV2(mvport)), hc, port);
2913 return rv;
2915 wdr->ctl_iot = mvport->port_iot;
2917 wdc_init_shadow_regs(&mvport->port_ata_channel);
2919 rv = bus_space_subregion(mvport->port_iot, mvport->port_ioh,
2920 SATA_SS, sizeof(uint32_t) * 3, &wdr->sata_baseioh);
2921 if (rv != 0) {
2922 aprint_error("%s:%d:%d: couldn't subregion SATA regs\n",
2923 device_xname(MVSATA_DEV2(mvport)), hc, port);
2924 return rv;
2926 wdr->sata_iot = mvport->port_iot;
2927 rv = bus_space_subregion(mvport->port_iot, mvport->port_ioh,
2928 SATA_SC, sizeof(uint32_t), &wdr->sata_control);
2929 if (rv != 0) {
2930 aprint_error("%s:%d:%d: couldn't subregion SControl\n",
2931 device_xname(MVSATA_DEV2(mvport)), hc, port);
2932 return rv;
2934 rv = bus_space_subregion(mvport->port_iot, mvport->port_ioh,
2935 SATA_SS, sizeof(uint32_t), &wdr->sata_status);
2936 if (rv != 0) {
2937 aprint_error("%s:%d:%d: couldn't subregion SStatus\n",
2938 device_xname(MVSATA_DEV2(mvport)), hc, port);
2939 return rv;
2941 rv = bus_space_subregion(mvport->port_iot, mvport->port_ioh,
2942 SATA_SE, sizeof(uint32_t), &wdr->sata_error);
2943 if (rv != 0) {
2944 aprint_error("%s:%d:%d: couldn't subregion SError\n",
2945 device_xname(MVSATA_DEV2(mvport)), hc, port);
2946 return rv;
2949 return 0;
2953 #ifndef MVSATA_WITHOUTDMA
2955 * There are functions to determine Host Queue Tag.
2956 * XXXX: We hope to rotate Tag to facilitate debugging.
2959 static inline void
2960 mvsata_quetag_init(struct mvsata_port *mvport)
2963 mvport->port_quetagidx = 0;
2966 static inline int
2967 mvsata_quetag_get(struct mvsata_port *mvport)
2969 int begin = mvport->port_quetagidx;
2971 do {
2972 if (mvport->port_reqtbl[mvport->port_quetagidx].xfer == NULL) {
2973 MVSATA_EDMAQ_INC(mvport->port_quetagidx);
2974 return mvport->port_quetagidx;
2976 MVSATA_EDMAQ_INC(mvport->port_quetagidx);
2977 } while (mvport->port_quetagidx != begin);
2979 return -1;
2982 static inline void
2983 mvsata_quetag_put(struct mvsata_port *mvport, int quetag)
2986 /* nothing */
2989 static void *
2990 mvsata_edma_resource_prepare(struct mvsata_port *mvport, bus_dma_tag_t dmat,
2991 bus_dmamap_t *dmamap, size_t size, int write)
2993 bus_dma_segment_t seg;
2994 int nseg, rv;
2995 void *kva;
2997 rv = bus_dmamem_alloc(dmat, size, PAGE_SIZE, 0, &seg, 1, &nseg,
2998 BUS_DMA_NOWAIT);
2999 if (rv != 0) {
3000 aprint_error("%s:%d:%d: DMA memory alloc failed: error=%d\n",
3001 device_xname(MVSATA_DEV2(mvport)),
3002 mvport->port_hc->hc, mvport->port, rv);
3003 goto fail;
3006 rv = bus_dmamem_map(dmat, &seg, nseg, size, &kva, BUS_DMA_NOWAIT);
3007 if (rv != 0) {
3008 aprint_error("%s:%d:%d: DMA memory map failed: error=%d\n",
3009 device_xname(MVSATA_DEV2(mvport)),
3010 mvport->port_hc->hc, mvport->port, rv);
3011 goto free;
3014 rv = bus_dmamap_load(dmat, *dmamap, kva, size, NULL,
3015 BUS_DMA_NOWAIT | (write ? BUS_DMA_WRITE : BUS_DMA_READ));
3016 if (rv != 0) {
3017 aprint_error("%s:%d:%d: DMA map load failed: error=%d\n",
3018 device_xname(MVSATA_DEV2(mvport)),
3019 mvport->port_hc->hc, mvport->port, rv);
3020 goto unmap;
3023 if (!write)
3024 bus_dmamap_sync(dmat, *dmamap, 0, size, BUS_DMASYNC_PREREAD);
3026 return kva;
3028 unmap:
3029 bus_dmamem_unmap(dmat, kva, size);
3030 free:
3031 bus_dmamem_free(dmat, &seg, nseg);
3032 fail:
3033 return NULL;
3036 /* ARGSUSED */
3037 static void
3038 mvsata_edma_resource_purge(struct mvsata_port *mvport, bus_dma_tag_t dmat,
3039 bus_dmamap_t dmamap, void *kva)
3042 bus_dmamap_unload(dmat, dmamap);
3043 bus_dmamem_unmap(dmat, kva, dmamap->dm_mapsize);
3044 bus_dmamem_free(dmat, dmamap->dm_segs, dmamap->dm_nsegs);
3047 static int
3048 mvsata_dma_bufload(struct mvsata_port *mvport, int index, void *databuf,
3049 size_t datalen, int flags)
3051 int rv, lop, sop;
3052 bus_dmamap_t data_dmamap = mvport->port_reqtbl[index].data_dmamap;
3054 lop = (flags & ATA_READ) ? BUS_DMA_READ : BUS_DMA_WRITE;
3055 sop = (flags & ATA_READ) ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE;
3057 rv = bus_dmamap_load(mvport->port_dmat, data_dmamap, databuf, datalen,
3058 NULL, BUS_DMA_NOWAIT | lop);
3059 if (rv) {
3060 aprint_error("%s:%d:%d: buffer load failed: error=%d",
3061 device_xname(MVSATA_DEV2(mvport)), mvport->port_hc->hc,
3062 mvport->port, rv);
3063 return rv;
3065 bus_dmamap_sync(mvport->port_dmat, data_dmamap, 0,
3066 data_dmamap->dm_mapsize, sop);
3068 return 0;
3071 static inline void
3072 mvsata_dma_bufunload(struct mvsata_port *mvport, int index, int flags)
3074 bus_dmamap_t data_dmamap = mvport->port_reqtbl[index].data_dmamap;
3076 bus_dmamap_sync(mvport->port_dmat, data_dmamap, 0,
3077 data_dmamap->dm_mapsize,
3078 (flags & ATA_READ) ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
3079 bus_dmamap_unload(mvport->port_dmat, data_dmamap);
3081 #endif
3083 static void
3084 mvsata_hreset_port(struct mvsata_port *mvport)
3086 struct mvsata_softc *sc = device_private(MVSATA_DEV2(mvport));
3088 MVSATA_EDMA_WRITE_4(mvport, EDMA_CMD, EDMA_CMD_EATARST);
3090 delay(25); /* allow reset propagation */
3092 MVSATA_EDMA_WRITE_4(mvport, EDMA_CMD, 0);
3094 mvport->_fix_phy_param._fix_phy(mvport);
3096 if (sc->sc_gen == gen1)
3097 delay(1000);
3100 static void
3101 mvsata_reset_port(struct mvsata_port *mvport)
3103 device_t parent = device_parent(MVSATA_DEV2(mvport));
3105 MVSATA_EDMA_WRITE_4(mvport, EDMA_CMD, EDMA_CMD_EDSEDMA);
3107 mvsata_hreset_port(mvport);
3109 if (device_is_a(parent, "pci"))
3110 MVSATA_EDMA_WRITE_4(mvport, EDMA_CFG,
3111 EDMA_CFG_RESERVED | EDMA_CFG_ERDBSZ);
3112 else /* SoC */
3113 MVSATA_EDMA_WRITE_4(mvport, EDMA_CFG,
3114 EDMA_CFG_RESERVED | EDMA_CFG_RESERVED2);
3115 MVSATA_EDMA_WRITE_4(mvport, EDMA_T, 0);
3116 MVSATA_EDMA_WRITE_4(mvport, EDMA_IEC, 0);
3117 MVSATA_EDMA_WRITE_4(mvport, EDMA_IEM, 0);
3118 MVSATA_EDMA_WRITE_4(mvport, EDMA_REQQBAH, 0);
3119 MVSATA_EDMA_WRITE_4(mvport, EDMA_REQQIP, 0);
3120 MVSATA_EDMA_WRITE_4(mvport, EDMA_REQQOP, 0);
3121 MVSATA_EDMA_WRITE_4(mvport, EDMA_RESQBAH, 0);
3122 MVSATA_EDMA_WRITE_4(mvport, EDMA_RESQIP, 0);
3123 MVSATA_EDMA_WRITE_4(mvport, EDMA_RESQOP, 0);
3124 MVSATA_EDMA_WRITE_4(mvport, EDMA_CMD, 0);
3125 MVSATA_EDMA_WRITE_4(mvport, EDMA_TC, 0);
3126 MVSATA_EDMA_WRITE_4(mvport, EDMA_IORT, 0xbc);
3128 MVSATA_EDMA_WRITE_4(mvport, SATA_FISIC, 0);
3131 static void
3132 mvsata_reset_hc(struct mvsata_hc *mvhc)
3134 #if 0
3135 uint32_t val;
3136 #endif
3138 MVSATA_HC_WRITE_4(mvhc, SATAHC_ICT, 0);
3139 MVSATA_HC_WRITE_4(mvhc, SATAHC_ITT, 0);
3140 MVSATA_HC_WRITE_4(mvhc, SATAHC_IC, 0);
3142 #if 0 /* XXXX needs? */
3143 MVSATA_HC_WRITE_4(mvhc, 0x01c, 0);
3146 * Keep the SS during power on and the reference clock bits (reset
3147 * sample)
3149 val = MVSATA_HC_READ_4(mvhc, 0x020);
3150 val &= 0x1c1c1c1c;
3151 val |= 0x03030303;
3152 MVSATA_HC_READ_4(mvhc, 0x020, 0);
3153 #endif
3156 #ifndef MVSATA_WITHOUTDMA
3157 static void
3158 mvsata_softreset(struct mvsata_port *mvport, int waitok)
3160 uint32_t stat;
3161 int i;
3163 MVSATA_WDC_WRITE_1(mvport, SRB_CAS, WDCTL_RST | WDCTL_IDS);
3164 delay(10);
3165 MVSATA_WDC_WRITE_1(mvport, SRB_CAS, WDCTL_IDS);
3166 delay(2000);
3168 if (waitok) {
3169 /* wait maximum 31sec */
3170 for (i = 31000; i > 0; i--) {
3171 stat = MVSATA_WDC_READ_1(mvport, SRB_CS);
3172 if (!(stat & WDCS_BSY))
3173 break;
3174 delay(1000);
3176 if (i == 0)
3177 aprint_error("%s:%d:%d: soft reset failed\n",
3178 device_xname(MVSATA_DEV2(mvport)),
3179 mvport->port_hc->hc, mvport->port);
3183 static void
3184 mvsata_edma_reset_qptr(struct mvsata_port *mvport)
3186 const bus_addr_t crpb_addr =
3187 mvport->port_crpb_dmamap->dm_segs[0].ds_addr;
3188 const uint32_t crpb_addr_mask =
3189 EDMA_RESQP_ERPQBAP_MASK | EDMA_RESQP_ERPQBA_MASK;
3191 MVSATA_EDMA_WRITE_4(mvport, EDMA_REQQBAH, 0);
3192 MVSATA_EDMA_WRITE_4(mvport, EDMA_REQQIP, 0);
3193 MVSATA_EDMA_WRITE_4(mvport, EDMA_REQQOP, 0);
3194 MVSATA_EDMA_WRITE_4(mvport, EDMA_RESQBAH, (crpb_addr >> 16) >> 16);
3195 MVSATA_EDMA_WRITE_4(mvport, EDMA_RESQIP, 0);
3196 MVSATA_EDMA_WRITE_4(mvport, EDMA_RESQOP, (crpb_addr & crpb_addr_mask));
3199 static inline void
3200 mvsata_edma_enable(struct mvsata_port *mvport)
3203 MVSATA_EDMA_WRITE_4(mvport, EDMA_CMD, EDMA_CMD_EENEDMA);
3206 static int
3207 mvsata_edma_disable(struct mvsata_port *mvport, int timeout, int waitok)
3209 uint32_t status, command;
3210 int ms;
3212 if (MVSATA_EDMA_READ_4(mvport, EDMA_CMD) & EDMA_CMD_EENEDMA) {
3213 for (ms = 0; ms < timeout; ms++) {
3214 status = MVSATA_EDMA_READ_4(mvport, EDMA_S);
3215 if (status & EDMA_S_EDMAIDLE)
3216 break;
3217 if (waitok)
3218 tsleep(&waitok, PRIBIO, "mvsata_edma1",
3219 mstohz(1));
3220 else
3221 delay(1000);
3223 if (ms == timeout)
3224 return EBUSY;
3226 /* The diable bit (eDsEDMA) is self negated. */
3227 MVSATA_EDMA_WRITE_4(mvport, EDMA_CMD, EDMA_CMD_EDSEDMA);
3229 for ( ; ms < timeout; ms++) {
3230 command = MVSATA_EDMA_READ_4(mvport, EDMA_CMD);
3231 if (!(command & EDMA_CMD_EENEDMA))
3232 break;
3233 if (waitok)
3234 tsleep(&waitok, PRIBIO, "mvsata_edma2",
3235 mstohz(1));
3236 else
3237 delay(1000);
3239 if (ms == timeout) {
3240 aprint_error("%s:%d:%d: unable to stop EDMA\n",
3241 device_xname(MVSATA_DEV2(mvport)),
3242 mvport->port_hc->hc, mvport->port);
3243 return EBUSY;
3246 return 0;
3250 * Set EDMA registers according to mode.
3251 * ex. NCQ/TCQ(queued)/non queued.
3253 static void
3254 mvsata_edma_config(struct mvsata_port *mvport, int mode)
3256 struct mvsata_softc *sc = device_private(MVSATA_DEV2(mvport));
3257 uint32_t reg;
3259 reg = MVSATA_EDMA_READ_4(mvport, EDMA_CFG);
3260 reg |= EDMA_CFG_RESERVED;
3262 if (mode == ncq) {
3263 if (sc->sc_gen == gen1) {
3264 aprint_error_dev(MVSATA_DEV2(mvport),
3265 "GenI not support NCQ\n");
3266 return;
3267 } else if (sc->sc_gen == gen2)
3268 reg |= EDMA_CFG_EDEVERR;
3269 reg |= EDMA_CFG_ESATANATVCMDQUE;
3270 } else if (mode == queued) {
3271 reg &= ~EDMA_CFG_ESATANATVCMDQUE;
3272 reg |= EDMA_CFG_EQUE;
3273 } else
3274 reg &= ~(EDMA_CFG_ESATANATVCMDQUE | EDMA_CFG_EQUE);
3276 if (sc->sc_gen == gen1)
3277 reg |= EDMA_CFG_ERDBSZ;
3278 else if (sc->sc_gen == gen2)
3279 reg |= (EDMA_CFG_ERDBSZEXT | EDMA_CFG_EWRBUFFERLEN);
3280 else if (sc->sc_gen == gen2e) {
3281 device_t parent = device_parent(MVSATA_DEV(sc));
3283 reg |= (EDMA_CFG_EMASKRXPM | EDMA_CFG_EHOSTQUEUECACHEEN);
3284 reg &= ~(EDMA_CFG_EEDMAFBS | EDMA_CFG_EEDMAQUELEN);
3286 if (device_is_a(parent, "pci"))
3287 reg |= (
3288 #if NATAPIBUS > 0
3289 EDMA_CFG_EEARLYCOMPLETIONEN |
3290 #endif
3291 EDMA_CFG_ECUTTHROUGHEN |
3292 EDMA_CFG_EWRBUFFERLEN |
3293 EDMA_CFG_ERDBSZEXT);
3295 MVSATA_EDMA_WRITE_4(mvport, EDMA_CFG, reg);
3297 reg = (
3298 EDMA_IE_EIORDYERR |
3299 EDMA_IE_ETRANSINT |
3300 EDMA_IE_EDEVCON |
3301 EDMA_IE_EDEVDIS);
3302 if (sc->sc_gen != gen1)
3303 reg |= (
3304 EDMA_IE_TRANSPROTERR |
3305 EDMA_IE_LINKDATATXERR(EDMA_IE_LINKTXERR_FISTXABORTED) |
3306 EDMA_IE_LINKDATATXERR(EDMA_IE_LINKXERR_OTHERERRORS) |
3307 EDMA_IE_LINKDATATXERR(EDMA_IE_LINKXERR_LINKLAYERRESET) |
3308 EDMA_IE_LINKDATATXERR(EDMA_IE_LINKXERR_INTERNALFIFO) |
3309 EDMA_IE_LINKDATATXERR(EDMA_IE_LINKXERR_SATACRC) |
3310 EDMA_IE_LINKCTLTXERR(EDMA_IE_LINKXERR_OTHERERRORS) |
3311 EDMA_IE_LINKCTLTXERR(EDMA_IE_LINKXERR_LINKLAYERRESET) |
3312 EDMA_IE_LINKCTLTXERR(EDMA_IE_LINKXERR_INTERNALFIFO) |
3313 EDMA_IE_LINKDATARXERR(EDMA_IE_LINKXERR_OTHERERRORS) |
3314 EDMA_IE_LINKDATARXERR(EDMA_IE_LINKXERR_LINKLAYERRESET) |
3315 EDMA_IE_LINKDATARXERR(EDMA_IE_LINKXERR_INTERNALFIFO) |
3316 EDMA_IE_LINKDATARXERR(EDMA_IE_LINKXERR_SATACRC) |
3317 EDMA_IE_LINKCTLRXERR(EDMA_IE_LINKXERR_OTHERERRORS) |
3318 EDMA_IE_LINKCTLRXERR(EDMA_IE_LINKXERR_LINKLAYERRESET) |
3319 EDMA_IE_LINKCTLRXERR(EDMA_IE_LINKXERR_INTERNALFIFO) |
3320 EDMA_IE_LINKCTLRXERR(EDMA_IE_LINKXERR_SATACRC) |
3321 EDMA_IE_ESELFDIS);
3323 if (mode == ncq)
3324 reg |= EDMA_IE_EDEVERR;
3325 MVSATA_EDMA_WRITE_4(mvport, EDMA_IEM, reg);
3326 reg = MVSATA_EDMA_READ_4(mvport, EDMA_HC);
3327 reg &= ~EDMA_IE_EDEVERR;
3328 if (mode != ncq)
3329 reg |= EDMA_IE_EDEVERR;
3330 MVSATA_EDMA_WRITE_4(mvport, EDMA_HC, reg);
3331 if (sc->sc_gen == gen2e) {
3333 * Clear FISWait4HostRdyEn[0] and [2].
3334 * [0]: Device to Host FIS with <ERR> or <DF> bit set to 1.
3335 * [2]: SDB FIS is received with <ERR> bit set to 1.
3337 reg = MVSATA_EDMA_READ_4(mvport, SATA_FISC);
3338 reg &= ~(SATA_FISC_FISWAIT4HOSTRDYEN_B0 |
3339 SATA_FISC_FISWAIT4HOSTRDYEN_B2);
3340 MVSATA_EDMA_WRITE_4(mvport, SATA_FISC, reg);
3343 mvport->port_edmamode = mode;
3348 * Generation dependent functions
3351 static void
3352 mvsata_edma_setup_crqb(struct mvsata_port *mvport, int erqqip, int quetag,
3353 struct ata_bio *ata_bio)
3355 struct crqb *crqb;
3356 bus_addr_t eprd_addr;
3357 daddr_t blkno;
3358 uint32_t rw;
3359 uint8_t cmd, head;
3360 int i;
3361 const int drive =
3362 mvport->port_ata_channel.ch_queue->active_xfer->c_drive;
3364 eprd_addr = mvport->port_eprd_dmamap->dm_segs[0].ds_addr +
3365 mvport->port_reqtbl[quetag].eprd_offset;
3366 rw = (ata_bio->flags & ATA_READ) ? CRQB_CDIR_READ : CRQB_CDIR_WRITE;
3367 cmd = (ata_bio->flags & ATA_READ) ? WDCC_READDMA : WDCC_WRITEDMA;
3368 head = WDSD_LBA;
3369 blkno = ata_bio->blkno;
3370 if (ata_bio->flags & ATA_LBA48)
3371 cmd = atacmd_to48(cmd);
3372 else {
3373 head |= ((ata_bio->blkno >> 24) & 0xf);
3374 blkno &= 0xffffff;
3376 crqb = &mvport->port_crqb->crqb + erqqip;
3377 crqb->cprdbl = htole32(eprd_addr & CRQB_CRQBL_EPRD_MASK);
3378 crqb->cprdbh = htole32((eprd_addr >> 16) >> 16);
3379 crqb->ctrlflg =
3380 htole16(rw | CRQB_CHOSTQUETAG(quetag) | CRQB_CPMPORT(drive));
3381 i = 0;
3382 if (mvport->port_edmamode == dma) {
3383 if (ata_bio->flags & ATA_LBA48)
3384 crqb->atacommand[i++] = htole16(CRQB_ATACOMMAND(
3385 CRQB_ATACOMMAND_SECTORCOUNT, ata_bio->nblks >> 8));
3386 crqb->atacommand[i++] = htole16(CRQB_ATACOMMAND(
3387 CRQB_ATACOMMAND_SECTORCOUNT, ata_bio->nblks));
3388 } else { /* ncq/queued */
3391 * XXXX: Oops, ata command is not correct. And, atabus layer
3392 * has not been supported yet now.
3393 * Queued DMA read/write.
3394 * read/write FPDMAQueued.
3397 if (ata_bio->flags & ATA_LBA48)
3398 crqb->atacommand[i++] = htole16(CRQB_ATACOMMAND(
3399 CRQB_ATACOMMAND_FEATURES, ata_bio->nblks >> 8));
3400 crqb->atacommand[i++] = htole16(CRQB_ATACOMMAND(
3401 CRQB_ATACOMMAND_FEATURES, ata_bio->nblks));
3402 crqb->atacommand[i++] = htole16(CRQB_ATACOMMAND(
3403 CRQB_ATACOMMAND_SECTORCOUNT, quetag << 3));
3405 if (ata_bio->flags & ATA_LBA48) {
3406 crqb->atacommand[i++] = htole16(CRQB_ATACOMMAND(
3407 CRQB_ATACOMMAND_LBALOW, blkno >> 24));
3408 crqb->atacommand[i++] = htole16(CRQB_ATACOMMAND(
3409 CRQB_ATACOMMAND_LBAMID, blkno >> 32));
3410 crqb->atacommand[i++] = htole16(CRQB_ATACOMMAND(
3411 CRQB_ATACOMMAND_LBAHIGH, blkno >> 40));
3413 crqb->atacommand[i++] =
3414 htole16(CRQB_ATACOMMAND(CRQB_ATACOMMAND_LBALOW, blkno));
3415 crqb->atacommand[i++] =
3416 htole16(CRQB_ATACOMMAND(CRQB_ATACOMMAND_LBAMID, blkno >> 8));
3417 crqb->atacommand[i++] =
3418 htole16(CRQB_ATACOMMAND(CRQB_ATACOMMAND_LBAHIGH, blkno >> 16));
3419 crqb->atacommand[i++] =
3420 htole16(CRQB_ATACOMMAND(CRQB_ATACOMMAND_DEVICE, head));
3421 crqb->atacommand[i++] = htole16(
3422 CRQB_ATACOMMAND(CRQB_ATACOMMAND_COMMAND, cmd) |
3423 CRQB_ATACOMMAND_LAST);
3425 #endif
3427 static uint32_t
3428 mvsata_read_preamps_gen1(struct mvsata_port *mvport)
3430 struct mvsata_hc *hc = mvport->port_hc;
3431 uint32_t reg;
3433 reg = MVSATA_HC_READ_4(hc, SATAHC_I_PHYMODE(mvport->port));
3435 * [12:11] : pre
3436 * [7:5] : amps
3438 return reg & 0x000018e0;
3441 static void
3442 mvsata_fix_phy_gen1(struct mvsata_port *mvport)
3444 struct mvsata_softc *sc = device_private(MVSATA_DEV2(mvport));
3445 struct mvsata_hc *mvhc = mvport->port_hc;
3446 uint32_t reg;
3447 int port = mvport->port, fix_apm_sq = 0;
3449 if (sc->sc_model == PCI_PRODUCT_MARVELL_88SX5080) {
3450 if (sc->sc_rev == 0x01)
3451 fix_apm_sq = 1;
3452 } else {
3453 if (sc->sc_rev == 0x00)
3454 fix_apm_sq = 1;
3457 if (fix_apm_sq) {
3459 * Disable auto-power management
3460 * 88SX50xx FEr SATA#12
3462 reg = MVSATA_HC_READ_4(mvhc, SATAHC_I_LTMODE(port));
3463 reg |= (1 << 19);
3464 MVSATA_HC_WRITE_4(mvhc, SATAHC_I_LTMODE(port), reg);
3467 * Fix squelch threshold
3468 * 88SX50xx FEr SATA#9
3470 reg = MVSATA_HC_READ_4(mvhc, SATAHC_I_PHYCONTROL(port));
3471 reg &= ~0x3;
3472 reg |= 0x1;
3473 MVSATA_HC_WRITE_4(mvhc, SATAHC_I_PHYCONTROL(port), reg);
3476 /* Revert values of pre-emphasis and signal amps to the saved ones */
3477 reg = MVSATA_HC_READ_4(mvhc, SATAHC_I_PHYMODE(port));
3478 reg &= ~0x000018e0; /* pre and amps mask */
3479 reg |= mvport->_fix_phy_param.pre_amps;
3480 MVSATA_HC_WRITE_4(mvhc, SATAHC_I_PHYMODE(port), reg);
3483 static void
3484 mvsata_devconn_gen1(struct mvsata_port *mvport)
3486 struct mvsata_softc *sc = device_private(MVSATA_DEV2(mvport));
3488 /* Fix for 88SX50xx FEr SATA#2 */
3489 mvport->_fix_phy_param._fix_phy(mvport);
3491 /* If disk is connected, then enable the activity LED */
3492 if (sc->sc_rev == 0x03) {
3493 /* XXXXX */
3497 static uint32_t
3498 mvsata_read_preamps_gen2(struct mvsata_port *mvport)
3500 uint32_t reg;
3502 reg = MVSATA_EDMA_READ_4(mvport, SATA_PHYM2);
3504 * [10:8] : amps
3505 * [7:5] : pre
3507 return reg & 0x000007e0;
3510 static void
3511 mvsata_fix_phy_gen2(struct mvsata_port *mvport)
3513 struct mvsata_softc *sc = device_private(MVSATA_DEV2(mvport));
3514 uint32_t reg;
3516 if ((sc->sc_gen == gen2 && sc->sc_rev == 0x07) ||
3517 sc->sc_gen == gen2e) {
3519 * Fix for
3520 * 88SX60X1 FEr SATA #23
3521 * 88SX6042/88SX7042 FEr SATA #23
3522 * 88F5182 FEr #SATA-S13
3523 * 88F5082 FEr #SATA-S13
3525 reg = MVSATA_EDMA_READ_4(mvport, SATA_PHYM2);
3526 reg &= ~(1 << 16);
3527 reg |= (1 << 31);
3528 MVSATA_EDMA_WRITE_4(mvport, SATA_PHYM2, reg);
3530 delay(200);
3532 reg = MVSATA_EDMA_READ_4(mvport, SATA_PHYM2);
3533 reg &= ~((1 << 16) | (1 << 31));
3534 MVSATA_EDMA_WRITE_4(mvport, SATA_PHYM2, reg);
3536 delay(200);
3539 /* Fix values in PHY Mode 3 Register.*/
3540 reg = MVSATA_EDMA_READ_4(mvport, SATA_PHYM3);
3541 reg &= ~0x7F900000;
3542 reg |= 0x2A800000;
3543 /* Implement Guidline 88F5182, 88F5082, 88F6082 (GL# SATA-S11) */
3544 if (sc->sc_model == PCI_PRODUCT_MARVELL_88F5082 ||
3545 sc->sc_model == PCI_PRODUCT_MARVELL_88F5182 ||
3546 sc->sc_model == PCI_PRODUCT_MARVELL_88F6082)
3547 reg &= ~0x0000001c;
3548 MVSATA_EDMA_WRITE_4(mvport, SATA_PHYM3, reg);
3551 * Fix values in PHY Mode 4 Register.
3552 * 88SX60x1 FEr SATA#10
3553 * 88F5182 GL #SATA-S10
3554 * 88F5082 GL #SATA-S10
3556 if ((sc->sc_gen == gen2 && sc->sc_rev == 0x07) ||
3557 sc->sc_gen == gen2e) {
3558 uint32_t tmp = 0;
3560 /* 88SX60x1 FEr SATA #13 */
3561 if (sc->sc_gen == 2 && sc->sc_rev == 0x07)
3562 tmp = MVSATA_EDMA_READ_4(mvport, SATA_PHYM3);
3564 reg = MVSATA_EDMA_READ_4(mvport, SATA_PHYM4);
3565 reg |= (1 << 0);
3566 reg &= ~(1 << 1);
3567 /* PHY Mode 4 Register of Gen IIE has some restriction */
3568 if (sc->sc_gen == gen2e) {
3569 reg &= ~0x5de3fffc;
3570 reg |= (1 << 2);
3572 MVSATA_EDMA_WRITE_4(mvport, SATA_PHYM4, reg);
3574 /* 88SX60x1 FEr SATA #13 */
3575 if (sc->sc_gen == 2 && sc->sc_rev == 0x07)
3576 MVSATA_EDMA_WRITE_4(mvport, SATA_PHYM3, tmp);
3579 /* Revert values of pre-emphasis and signal amps to the saved ones */
3580 reg = MVSATA_EDMA_READ_4(mvport, SATA_PHYM2);
3581 reg &= ~0x000007e0; /* pre and amps mask */
3582 reg |= mvport->_fix_phy_param.pre_amps;
3583 reg &= ~(1 << 16);
3584 if (sc->sc_gen == gen2e) {
3586 * according to mvSata 3.6.1, some IIE values are fixed.
3587 * some reserved fields must be written with fixed values.
3589 reg &= ~0xC30FF01F;
3590 reg |= 0x0000900F;
3592 MVSATA_EDMA_WRITE_4(mvport, SATA_PHYM2, reg);
3595 #ifndef MVSATA_WITHOUTDMA
3596 static void
3597 mvsata_edma_setup_crqb_gen2e(struct mvsata_port *mvport, int erqqip, int quetag,
3598 struct ata_bio *ata_bio)
3600 struct crqb_gen2e *crqb;
3601 bus_addr_t eprd_addr;
3602 daddr_t blkno;
3603 uint32_t ctrlflg, rw;
3604 uint8_t cmd, head;
3605 const int drive =
3606 mvport->port_ata_channel.ch_queue->active_xfer->c_drive;
3608 eprd_addr = mvport->port_eprd_dmamap->dm_segs[0].ds_addr +
3609 mvport->port_reqtbl[quetag].eprd_offset;
3610 rw = (ata_bio->flags & ATA_READ) ? CRQB_CDIR_READ : CRQB_CDIR_WRITE;
3611 ctrlflg = (rw | CRQB_CDEVICEQUETAG(quetag) | CRQB_CPMPORT(drive) |
3612 CRQB_CPRDMODE_EPRD | CRQB_CHOSTQUETAG_GEN2(quetag));
3613 cmd = (ata_bio->flags & ATA_READ) ? WDCC_READDMA : WDCC_WRITEDMA;
3614 head = WDSD_LBA;
3615 blkno = ata_bio->blkno;
3616 if (ata_bio->flags & ATA_LBA48)
3617 cmd = atacmd_to48(cmd);
3618 else {
3619 head |= ((ata_bio->blkno >> 24) & 0xf);
3620 blkno &= 0xffffff;
3622 crqb = &mvport->port_crqb->crqb_gen2e + erqqip;
3623 crqb->cprdbl = htole32(eprd_addr & CRQB_CRQBL_EPRD_MASK);
3624 crqb->cprdbh = htole32((eprd_addr >> 16) >> 16);
3625 crqb->ctrlflg = htole32(ctrlflg);
3626 if (mvport->port_edmamode == dma) {
3627 crqb->atacommand[0] = htole32(cmd << 16);
3628 crqb->atacommand[1] = htole32((blkno & 0xffffff) | head << 24);
3629 crqb->atacommand[2] = htole32(((blkno >> 24) & 0xffffff));
3630 crqb->atacommand[3] = htole32(ata_bio->nblks & 0xffff);
3631 } else { /* ncq/queued */
3634 * XXXX: Oops, ata command is not correct. And, atabus layer
3635 * has not been supported yet now.
3636 * Queued DMA read/write.
3637 * read/write FPDMAQueued.
3640 crqb->atacommand[0] = htole32(
3641 (cmd << 16) | ((ata_bio->nblks & 0xff) << 24));
3642 crqb->atacommand[1] = htole32((blkno & 0xffffff) | head << 24);
3643 crqb->atacommand[2] = htole32(((blkno >> 24) & 0xffffff) |
3644 ((ata_bio->nblks >> 8) & 0xff));
3645 crqb->atacommand[3] = htole32(ata_bio->nblks & 0xffff);
3646 crqb->atacommand[3] = htole32(quetag << 3);
3651 #ifdef MVSATA_DEBUG
3652 #define MVSATA_DEBUG_PRINT(type, size, n, p) \
3653 do { \
3654 int _i; \
3655 u_char *_p = (p); \
3657 printf(#type "(%d)", (n)); \
3658 for (_i = 0; _i < (size); _i++, _p++) { \
3659 if (_i % 16 == 0) \
3660 printf("\n "); \
3661 printf(" %02x", *_p); \
3663 printf("\n"); \
3664 } while (0 /* CONSTCOND */)
3666 static void
3667 mvsata_print_crqb(struct mvsata_port *mvport, int n)
3670 MVSATA_DEBUG_PRINT(crqb, sizeof(union mvsata_crqb),
3671 n, (u_char *)(mvport->port_crqb + n));
3674 static void
3675 mvsata_print_crpb(struct mvsata_port *mvport, int n)
3678 MVSATA_DEBUG_PRINT(crpb, sizeof(struct crpb),
3679 n, (u_char *)(mvport->port_crpb + n));
3682 static void
3683 mvsata_print_eprd(struct mvsata_port *mvport, int n)
3685 struct eprd *eprd;
3686 int i = 0;
3688 eprd = mvport->port_reqtbl[n].eprd;
3689 while (1 /*CONSTCOND*/) {
3690 MVSATA_DEBUG_PRINT(eprd, sizeof(struct eprd),
3691 i, (u_char *)eprd);
3692 if (eprd->eot & EPRD_EOT)
3693 break;
3694 eprd++;
3695 i++;
3698 #endif
3699 #endif