[IPV4]: Use ctl paths to register devinet sysctls
[wrt350n-kernel.git] / drivers / ata / sata_sx4.c
blobe3d56bc6726db30eb3916d3a4821c890bbf86ebc
1 /*
2 * sata_sx4.c - Promise SATA
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
8 * Copyright 2003-2004 Red Hat, Inc.
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
14 * any later version.
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; see the file COPYING. If not, write to
23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26 * libata documentation is available via 'make {ps|pdf}docs',
27 * as Documentation/DocBook/libata.*
29 * Hardware documentation available under NDA.
34 Theory of operation
35 -------------------
37 The SX4 (PDC20621) chip features a single Host DMA (HDMA) copy
38 engine, DIMM memory, and four ATA engines (one per SATA port).
39 Data is copied to/from DIMM memory by the HDMA engine, before
40 handing off to one (or more) of the ATA engines. The ATA
41 engines operate solely on DIMM memory.
43 The SX4 behaves like a PATA chip, with no SATA controls or
44 knowledge whatsoever, leading to the presumption that
45 PATA<->SATA bridges exist on SX4 boards, external to the
46 PDC20621 chip itself.
48 The chip is quite capable, supporting an XOR engine and linked
49 hardware commands (permits a string to transactions to be
50 submitted and waited-on as a single unit), and an optional
51 microprocessor.
53 The limiting factor is largely software. This Linux driver was
54 written to multiplex the single HDMA engine to copy disk
55 transactions into a fixed DIMM memory space, from where an ATA
56 engine takes over. As a result, each WRITE looks like this:
58 submit HDMA packet to hardware
59 hardware copies data from system memory to DIMM
60 hardware raises interrupt
62 submit ATA packet to hardware
63 hardware executes ATA WRITE command, w/ data in DIMM
64 hardware raises interrupt
66 and each READ looks like this:
68 submit ATA packet to hardware
69 hardware executes ATA READ command, w/ data in DIMM
70 hardware raises interrupt
72 submit HDMA packet to hardware
73 hardware copies data from DIMM to system memory
74 hardware raises interrupt
76 This is a very slow, lock-step way of doing things that can
77 certainly be improved by motivated kernel hackers.
81 #include <linux/kernel.h>
82 #include <linux/module.h>
83 #include <linux/pci.h>
84 #include <linux/init.h>
85 #include <linux/blkdev.h>
86 #include <linux/delay.h>
87 #include <linux/interrupt.h>
88 #include <linux/device.h>
89 #include <scsi/scsi_host.h>
90 #include <scsi/scsi_cmnd.h>
91 #include <linux/libata.h>
92 #include "sata_promise.h"
94 #define DRV_NAME "sata_sx4"
95 #define DRV_VERSION "0.12"
98 enum {
99 PDC_MMIO_BAR = 3,
100 PDC_DIMM_BAR = 4,
102 PDC_PRD_TBL = 0x44, /* Direct command DMA table addr */
104 PDC_PKT_SUBMIT = 0x40, /* Command packet pointer addr */
105 PDC_HDMA_PKT_SUBMIT = 0x100, /* Host DMA packet pointer addr */
106 PDC_INT_SEQMASK = 0x40, /* Mask of asserted SEQ INTs */
107 PDC_HDMA_CTLSTAT = 0x12C, /* Host DMA control / status */
109 PDC_CTLSTAT = 0x60, /* IDEn control / status */
111 PDC_20621_SEQCTL = 0x400,
112 PDC_20621_SEQMASK = 0x480,
113 PDC_20621_GENERAL_CTL = 0x484,
114 PDC_20621_PAGE_SIZE = (32 * 1024),
116 /* chosen, not constant, values; we design our own DIMM mem map */
117 PDC_20621_DIMM_WINDOW = 0x0C, /* page# for 32K DIMM window */
118 PDC_20621_DIMM_BASE = 0x00200000,
119 PDC_20621_DIMM_DATA = (64 * 1024),
120 PDC_DIMM_DATA_STEP = (256 * 1024),
121 PDC_DIMM_WINDOW_STEP = (8 * 1024),
122 PDC_DIMM_HOST_PRD = (6 * 1024),
123 PDC_DIMM_HOST_PKT = (128 * 0),
124 PDC_DIMM_HPKT_PRD = (128 * 1),
125 PDC_DIMM_ATA_PKT = (128 * 2),
126 PDC_DIMM_APKT_PRD = (128 * 3),
127 PDC_DIMM_HEADER_SZ = PDC_DIMM_APKT_PRD + 128,
128 PDC_PAGE_WINDOW = 0x40,
129 PDC_PAGE_DATA = PDC_PAGE_WINDOW +
130 (PDC_20621_DIMM_DATA / PDC_20621_PAGE_SIZE),
131 PDC_PAGE_SET = PDC_DIMM_DATA_STEP / PDC_20621_PAGE_SIZE,
133 PDC_CHIP0_OFS = 0xC0000, /* offset of chip #0 */
135 PDC_20621_ERR_MASK = (1<<19) | (1<<20) | (1<<21) | (1<<22) |
136 (1<<23),
138 board_20621 = 0, /* FastTrak S150 SX4 */
140 PDC_MASK_INT = (1 << 10), /* HDMA/ATA mask int */
141 PDC_RESET = (1 << 11), /* HDMA/ATA reset */
142 PDC_DMA_ENABLE = (1 << 7), /* DMA start/stop */
144 PDC_MAX_HDMA = 32,
145 PDC_HDMA_Q_MASK = (PDC_MAX_HDMA - 1),
147 PDC_DIMM0_SPD_DEV_ADDRESS = 0x50,
148 PDC_DIMM1_SPD_DEV_ADDRESS = 0x51,
149 PDC_I2C_CONTROL = 0x48,
150 PDC_I2C_ADDR_DATA = 0x4C,
151 PDC_DIMM0_CONTROL = 0x80,
152 PDC_DIMM1_CONTROL = 0x84,
153 PDC_SDRAM_CONTROL = 0x88,
154 PDC_I2C_WRITE = 0, /* master -> slave */
155 PDC_I2C_READ = (1 << 6), /* master <- slave */
156 PDC_I2C_START = (1 << 7), /* start I2C proto */
157 PDC_I2C_MASK_INT = (1 << 5), /* mask I2C interrupt */
158 PDC_I2C_COMPLETE = (1 << 16), /* I2C normal compl. */
159 PDC_I2C_NO_ACK = (1 << 20), /* slave no-ack addr */
160 PDC_DIMM_SPD_SUBADDRESS_START = 0x00,
161 PDC_DIMM_SPD_SUBADDRESS_END = 0x7F,
162 PDC_DIMM_SPD_ROW_NUM = 3,
163 PDC_DIMM_SPD_COLUMN_NUM = 4,
164 PDC_DIMM_SPD_MODULE_ROW = 5,
165 PDC_DIMM_SPD_TYPE = 11,
166 PDC_DIMM_SPD_FRESH_RATE = 12,
167 PDC_DIMM_SPD_BANK_NUM = 17,
168 PDC_DIMM_SPD_CAS_LATENCY = 18,
169 PDC_DIMM_SPD_ATTRIBUTE = 21,
170 PDC_DIMM_SPD_ROW_PRE_CHARGE = 27,
171 PDC_DIMM_SPD_ROW_ACTIVE_DELAY = 28,
172 PDC_DIMM_SPD_RAS_CAS_DELAY = 29,
173 PDC_DIMM_SPD_ACTIVE_PRECHARGE = 30,
174 PDC_DIMM_SPD_SYSTEM_FREQ = 126,
175 PDC_CTL_STATUS = 0x08,
176 PDC_DIMM_WINDOW_CTLR = 0x0C,
177 PDC_TIME_CONTROL = 0x3C,
178 PDC_TIME_PERIOD = 0x40,
179 PDC_TIME_COUNTER = 0x44,
180 PDC_GENERAL_CTLR = 0x484,
181 PCI_PLL_INIT = 0x8A531824,
182 PCI_X_TCOUNT = 0xEE1E5CFF,
184 /* PDC_TIME_CONTROL bits */
185 PDC_TIMER_BUZZER = (1 << 10),
186 PDC_TIMER_MODE_PERIODIC = 0, /* bits 9:8 == 00 */
187 PDC_TIMER_MODE_ONCE = (1 << 8), /* bits 9:8 == 01 */
188 PDC_TIMER_ENABLE = (1 << 7),
189 PDC_TIMER_MASK_INT = (1 << 5),
190 PDC_TIMER_SEQ_MASK = 0x1f, /* SEQ ID for timer */
191 PDC_TIMER_DEFAULT = PDC_TIMER_MODE_ONCE |
192 PDC_TIMER_ENABLE |
193 PDC_TIMER_MASK_INT,
197 struct pdc_port_priv {
198 u8 dimm_buf[(ATA_PRD_SZ * ATA_MAX_PRD) + 512];
199 u8 *pkt;
200 dma_addr_t pkt_dma;
203 struct pdc_host_priv {
204 unsigned int doing_hdma;
205 unsigned int hdma_prod;
206 unsigned int hdma_cons;
207 struct {
208 struct ata_queued_cmd *qc;
209 unsigned int seq;
210 unsigned long pkt_ofs;
211 } hdma[32];
215 static int pdc_sata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
216 static void pdc_eng_timeout(struct ata_port *ap);
217 static void pdc_20621_phy_reset(struct ata_port *ap);
218 static int pdc_port_start(struct ata_port *ap);
219 static void pdc20621_qc_prep(struct ata_queued_cmd *qc);
220 static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
221 static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
222 static unsigned int pdc20621_dimm_init(struct ata_host *host);
223 static int pdc20621_detect_dimm(struct ata_host *host);
224 static unsigned int pdc20621_i2c_read(struct ata_host *host,
225 u32 device, u32 subaddr, u32 *pdata);
226 static int pdc20621_prog_dimm0(struct ata_host *host);
227 static unsigned int pdc20621_prog_dimm_global(struct ata_host *host);
228 #ifdef ATA_VERBOSE_DEBUG
229 static void pdc20621_get_from_dimm(struct ata_host *host,
230 void *psource, u32 offset, u32 size);
231 #endif
232 static void pdc20621_put_to_dimm(struct ata_host *host,
233 void *psource, u32 offset, u32 size);
234 static void pdc20621_irq_clear(struct ata_port *ap);
235 static unsigned int pdc20621_qc_issue_prot(struct ata_queued_cmd *qc);
238 static struct scsi_host_template pdc_sata_sht = {
239 .module = THIS_MODULE,
240 .name = DRV_NAME,
241 .ioctl = ata_scsi_ioctl,
242 .queuecommand = ata_scsi_queuecmd,
243 .can_queue = ATA_DEF_QUEUE,
244 .this_id = ATA_SHT_THIS_ID,
245 .sg_tablesize = LIBATA_MAX_PRD,
246 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
247 .emulated = ATA_SHT_EMULATED,
248 .use_clustering = ATA_SHT_USE_CLUSTERING,
249 .proc_name = DRV_NAME,
250 .dma_boundary = ATA_DMA_BOUNDARY,
251 .slave_configure = ata_scsi_slave_config,
252 .slave_destroy = ata_scsi_slave_destroy,
253 .bios_param = ata_std_bios_param,
256 static const struct ata_port_operations pdc_20621_ops = {
257 .tf_load = pdc_tf_load_mmio,
258 .tf_read = ata_tf_read,
259 .check_status = ata_check_status,
260 .exec_command = pdc_exec_command_mmio,
261 .dev_select = ata_std_dev_select,
262 .phy_reset = pdc_20621_phy_reset,
263 .qc_prep = pdc20621_qc_prep,
264 .qc_issue = pdc20621_qc_issue_prot,
265 .data_xfer = ata_data_xfer,
266 .eng_timeout = pdc_eng_timeout,
267 .irq_clear = pdc20621_irq_clear,
268 .irq_on = ata_irq_on,
269 .port_start = pdc_port_start,
272 static const struct ata_port_info pdc_port_info[] = {
273 /* board_20621 */
275 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
276 ATA_FLAG_SRST | ATA_FLAG_MMIO |
277 ATA_FLAG_NO_ATAPI | ATA_FLAG_PIO_POLLING,
278 .pio_mask = 0x1f, /* pio0-4 */
279 .mwdma_mask = 0x07, /* mwdma0-2 */
280 .udma_mask = ATA_UDMA6,
281 .port_ops = &pdc_20621_ops,
286 static const struct pci_device_id pdc_sata_pci_tbl[] = {
287 { PCI_VDEVICE(PROMISE, 0x6622), board_20621 },
289 { } /* terminate list */
292 static struct pci_driver pdc_sata_pci_driver = {
293 .name = DRV_NAME,
294 .id_table = pdc_sata_pci_tbl,
295 .probe = pdc_sata_init_one,
296 .remove = ata_pci_remove_one,
300 static int pdc_port_start(struct ata_port *ap)
302 struct device *dev = ap->host->dev;
303 struct pdc_port_priv *pp;
304 int rc;
306 rc = ata_port_start(ap);
307 if (rc)
308 return rc;
310 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
311 if (!pp)
312 return -ENOMEM;
314 pp->pkt = dmam_alloc_coherent(dev, 128, &pp->pkt_dma, GFP_KERNEL);
315 if (!pp->pkt)
316 return -ENOMEM;
318 ap->private_data = pp;
320 return 0;
323 static void pdc_20621_phy_reset(struct ata_port *ap)
325 VPRINTK("ENTER\n");
326 ap->cbl = ATA_CBL_SATA;
327 ata_port_probe(ap);
328 ata_bus_reset(ap);
331 static inline void pdc20621_ata_sg(struct ata_taskfile *tf, u8 *buf,
332 unsigned int portno,
333 unsigned int total_len)
335 u32 addr;
336 unsigned int dw = PDC_DIMM_APKT_PRD >> 2;
337 __le32 *buf32 = (__le32 *) buf;
339 /* output ATA packet S/G table */
340 addr = PDC_20621_DIMM_BASE + PDC_20621_DIMM_DATA +
341 (PDC_DIMM_DATA_STEP * portno);
342 VPRINTK("ATA sg addr 0x%x, %d\n", addr, addr);
343 buf32[dw] = cpu_to_le32(addr);
344 buf32[dw + 1] = cpu_to_le32(total_len | ATA_PRD_EOT);
346 VPRINTK("ATA PSG @ %x == (0x%x, 0x%x)\n",
347 PDC_20621_DIMM_BASE +
348 (PDC_DIMM_WINDOW_STEP * portno) +
349 PDC_DIMM_APKT_PRD,
350 buf32[dw], buf32[dw + 1]);
353 static inline void pdc20621_host_sg(struct ata_taskfile *tf, u8 *buf,
354 unsigned int portno,
355 unsigned int total_len)
357 u32 addr;
358 unsigned int dw = PDC_DIMM_HPKT_PRD >> 2;
359 __le32 *buf32 = (__le32 *) buf;
361 /* output Host DMA packet S/G table */
362 addr = PDC_20621_DIMM_BASE + PDC_20621_DIMM_DATA +
363 (PDC_DIMM_DATA_STEP * portno);
365 buf32[dw] = cpu_to_le32(addr);
366 buf32[dw + 1] = cpu_to_le32(total_len | ATA_PRD_EOT);
368 VPRINTK("HOST PSG @ %x == (0x%x, 0x%x)\n",
369 PDC_20621_DIMM_BASE +
370 (PDC_DIMM_WINDOW_STEP * portno) +
371 PDC_DIMM_HPKT_PRD,
372 buf32[dw], buf32[dw + 1]);
375 static inline unsigned int pdc20621_ata_pkt(struct ata_taskfile *tf,
376 unsigned int devno, u8 *buf,
377 unsigned int portno)
379 unsigned int i, dw;
380 __le32 *buf32 = (__le32 *) buf;
381 u8 dev_reg;
383 unsigned int dimm_sg = PDC_20621_DIMM_BASE +
384 (PDC_DIMM_WINDOW_STEP * portno) +
385 PDC_DIMM_APKT_PRD;
386 VPRINTK("ENTER, dimm_sg == 0x%x, %d\n", dimm_sg, dimm_sg);
388 i = PDC_DIMM_ATA_PKT;
391 * Set up ATA packet
393 if ((tf->protocol == ATA_PROT_DMA) && (!(tf->flags & ATA_TFLAG_WRITE)))
394 buf[i++] = PDC_PKT_READ;
395 else if (tf->protocol == ATA_PROT_NODATA)
396 buf[i++] = PDC_PKT_NODATA;
397 else
398 buf[i++] = 0;
399 buf[i++] = 0; /* reserved */
400 buf[i++] = portno + 1; /* seq. id */
401 buf[i++] = 0xff; /* delay seq. id */
403 /* dimm dma S/G, and next-pkt */
404 dw = i >> 2;
405 if (tf->protocol == ATA_PROT_NODATA)
406 buf32[dw] = 0;
407 else
408 buf32[dw] = cpu_to_le32(dimm_sg);
409 buf32[dw + 1] = 0;
410 i += 8;
412 if (devno == 0)
413 dev_reg = ATA_DEVICE_OBS;
414 else
415 dev_reg = ATA_DEVICE_OBS | ATA_DEV1;
417 /* select device */
418 buf[i++] = (1 << 5) | PDC_PKT_CLEAR_BSY | ATA_REG_DEVICE;
419 buf[i++] = dev_reg;
421 /* device control register */
422 buf[i++] = (1 << 5) | PDC_REG_DEVCTL;
423 buf[i++] = tf->ctl;
425 return i;
428 static inline void pdc20621_host_pkt(struct ata_taskfile *tf, u8 *buf,
429 unsigned int portno)
431 unsigned int dw;
432 u32 tmp;
433 __le32 *buf32 = (__le32 *) buf;
435 unsigned int host_sg = PDC_20621_DIMM_BASE +
436 (PDC_DIMM_WINDOW_STEP * portno) +
437 PDC_DIMM_HOST_PRD;
438 unsigned int dimm_sg = PDC_20621_DIMM_BASE +
439 (PDC_DIMM_WINDOW_STEP * portno) +
440 PDC_DIMM_HPKT_PRD;
441 VPRINTK("ENTER, dimm_sg == 0x%x, %d\n", dimm_sg, dimm_sg);
442 VPRINTK("host_sg == 0x%x, %d\n", host_sg, host_sg);
444 dw = PDC_DIMM_HOST_PKT >> 2;
447 * Set up Host DMA packet
449 if ((tf->protocol == ATA_PROT_DMA) && (!(tf->flags & ATA_TFLAG_WRITE)))
450 tmp = PDC_PKT_READ;
451 else
452 tmp = 0;
453 tmp |= ((portno + 1 + 4) << 16); /* seq. id */
454 tmp |= (0xff << 24); /* delay seq. id */
455 buf32[dw + 0] = cpu_to_le32(tmp);
456 buf32[dw + 1] = cpu_to_le32(host_sg);
457 buf32[dw + 2] = cpu_to_le32(dimm_sg);
458 buf32[dw + 3] = 0;
460 VPRINTK("HOST PKT @ %x == (0x%x 0x%x 0x%x 0x%x)\n",
461 PDC_20621_DIMM_BASE + (PDC_DIMM_WINDOW_STEP * portno) +
462 PDC_DIMM_HOST_PKT,
463 buf32[dw + 0],
464 buf32[dw + 1],
465 buf32[dw + 2],
466 buf32[dw + 3]);
469 static void pdc20621_dma_prep(struct ata_queued_cmd *qc)
471 struct scatterlist *sg;
472 struct ata_port *ap = qc->ap;
473 struct pdc_port_priv *pp = ap->private_data;
474 void __iomem *mmio = ap->host->iomap[PDC_MMIO_BAR];
475 void __iomem *dimm_mmio = ap->host->iomap[PDC_DIMM_BAR];
476 unsigned int portno = ap->port_no;
477 unsigned int i, si, idx, total_len = 0, sgt_len;
478 u32 *buf = (u32 *) &pp->dimm_buf[PDC_DIMM_HEADER_SZ];
480 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
482 VPRINTK("ata%u: ENTER\n", ap->print_id);
484 /* hard-code chip #0 */
485 mmio += PDC_CHIP0_OFS;
488 * Build S/G table
490 idx = 0;
491 for_each_sg(qc->sg, sg, qc->n_elem, si) {
492 buf[idx++] = cpu_to_le32(sg_dma_address(sg));
493 buf[idx++] = cpu_to_le32(sg_dma_len(sg));
494 total_len += sg_dma_len(sg);
496 buf[idx - 1] |= cpu_to_le32(ATA_PRD_EOT);
497 sgt_len = idx * 4;
500 * Build ATA, host DMA packets
502 pdc20621_host_sg(&qc->tf, &pp->dimm_buf[0], portno, total_len);
503 pdc20621_host_pkt(&qc->tf, &pp->dimm_buf[0], portno);
505 pdc20621_ata_sg(&qc->tf, &pp->dimm_buf[0], portno, total_len);
506 i = pdc20621_ata_pkt(&qc->tf, qc->dev->devno, &pp->dimm_buf[0], portno);
508 if (qc->tf.flags & ATA_TFLAG_LBA48)
509 i = pdc_prep_lba48(&qc->tf, &pp->dimm_buf[0], i);
510 else
511 i = pdc_prep_lba28(&qc->tf, &pp->dimm_buf[0], i);
513 pdc_pkt_footer(&qc->tf, &pp->dimm_buf[0], i);
515 /* copy three S/G tables and two packets to DIMM MMIO window */
516 memcpy_toio(dimm_mmio + (portno * PDC_DIMM_WINDOW_STEP),
517 &pp->dimm_buf, PDC_DIMM_HEADER_SZ);
518 memcpy_toio(dimm_mmio + (portno * PDC_DIMM_WINDOW_STEP) +
519 PDC_DIMM_HOST_PRD,
520 &pp->dimm_buf[PDC_DIMM_HEADER_SZ], sgt_len);
522 /* force host FIFO dump */
523 writel(0x00000001, mmio + PDC_20621_GENERAL_CTL);
525 readl(dimm_mmio); /* MMIO PCI posting flush */
527 VPRINTK("ata pkt buf ofs %u, prd size %u, mmio copied\n", i, sgt_len);
530 static void pdc20621_nodata_prep(struct ata_queued_cmd *qc)
532 struct ata_port *ap = qc->ap;
533 struct pdc_port_priv *pp = ap->private_data;
534 void __iomem *mmio = ap->host->iomap[PDC_MMIO_BAR];
535 void __iomem *dimm_mmio = ap->host->iomap[PDC_DIMM_BAR];
536 unsigned int portno = ap->port_no;
537 unsigned int i;
539 VPRINTK("ata%u: ENTER\n", ap->print_id);
541 /* hard-code chip #0 */
542 mmio += PDC_CHIP0_OFS;
544 i = pdc20621_ata_pkt(&qc->tf, qc->dev->devno, &pp->dimm_buf[0], portno);
546 if (qc->tf.flags & ATA_TFLAG_LBA48)
547 i = pdc_prep_lba48(&qc->tf, &pp->dimm_buf[0], i);
548 else
549 i = pdc_prep_lba28(&qc->tf, &pp->dimm_buf[0], i);
551 pdc_pkt_footer(&qc->tf, &pp->dimm_buf[0], i);
553 /* copy three S/G tables and two packets to DIMM MMIO window */
554 memcpy_toio(dimm_mmio + (portno * PDC_DIMM_WINDOW_STEP),
555 &pp->dimm_buf, PDC_DIMM_HEADER_SZ);
557 /* force host FIFO dump */
558 writel(0x00000001, mmio + PDC_20621_GENERAL_CTL);
560 readl(dimm_mmio); /* MMIO PCI posting flush */
562 VPRINTK("ata pkt buf ofs %u, mmio copied\n", i);
565 static void pdc20621_qc_prep(struct ata_queued_cmd *qc)
567 switch (qc->tf.protocol) {
568 case ATA_PROT_DMA:
569 pdc20621_dma_prep(qc);
570 break;
571 case ATA_PROT_NODATA:
572 pdc20621_nodata_prep(qc);
573 break;
574 default:
575 break;
579 static void __pdc20621_push_hdma(struct ata_queued_cmd *qc,
580 unsigned int seq,
581 u32 pkt_ofs)
583 struct ata_port *ap = qc->ap;
584 struct ata_host *host = ap->host;
585 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
587 /* hard-code chip #0 */
588 mmio += PDC_CHIP0_OFS;
590 writel(0x00000001, mmio + PDC_20621_SEQCTL + (seq * 4));
591 readl(mmio + PDC_20621_SEQCTL + (seq * 4)); /* flush */
593 writel(pkt_ofs, mmio + PDC_HDMA_PKT_SUBMIT);
594 readl(mmio + PDC_HDMA_PKT_SUBMIT); /* flush */
597 static void pdc20621_push_hdma(struct ata_queued_cmd *qc,
598 unsigned int seq,
599 u32 pkt_ofs)
601 struct ata_port *ap = qc->ap;
602 struct pdc_host_priv *pp = ap->host->private_data;
603 unsigned int idx = pp->hdma_prod & PDC_HDMA_Q_MASK;
605 if (!pp->doing_hdma) {
606 __pdc20621_push_hdma(qc, seq, pkt_ofs);
607 pp->doing_hdma = 1;
608 return;
611 pp->hdma[idx].qc = qc;
612 pp->hdma[idx].seq = seq;
613 pp->hdma[idx].pkt_ofs = pkt_ofs;
614 pp->hdma_prod++;
617 static void pdc20621_pop_hdma(struct ata_queued_cmd *qc)
619 struct ata_port *ap = qc->ap;
620 struct pdc_host_priv *pp = ap->host->private_data;
621 unsigned int idx = pp->hdma_cons & PDC_HDMA_Q_MASK;
623 /* if nothing on queue, we're done */
624 if (pp->hdma_prod == pp->hdma_cons) {
625 pp->doing_hdma = 0;
626 return;
629 __pdc20621_push_hdma(pp->hdma[idx].qc, pp->hdma[idx].seq,
630 pp->hdma[idx].pkt_ofs);
631 pp->hdma_cons++;
634 #ifdef ATA_VERBOSE_DEBUG
635 static void pdc20621_dump_hdma(struct ata_queued_cmd *qc)
637 struct ata_port *ap = qc->ap;
638 unsigned int port_no = ap->port_no;
639 void __iomem *dimm_mmio = ap->host->iomap[PDC_DIMM_BAR];
641 dimm_mmio += (port_no * PDC_DIMM_WINDOW_STEP);
642 dimm_mmio += PDC_DIMM_HOST_PKT;
644 printk(KERN_ERR "HDMA[0] == 0x%08X\n", readl(dimm_mmio));
645 printk(KERN_ERR "HDMA[1] == 0x%08X\n", readl(dimm_mmio + 4));
646 printk(KERN_ERR "HDMA[2] == 0x%08X\n", readl(dimm_mmio + 8));
647 printk(KERN_ERR "HDMA[3] == 0x%08X\n", readl(dimm_mmio + 12));
649 #else
650 static inline void pdc20621_dump_hdma(struct ata_queued_cmd *qc) { }
651 #endif /* ATA_VERBOSE_DEBUG */
653 static void pdc20621_packet_start(struct ata_queued_cmd *qc)
655 struct ata_port *ap = qc->ap;
656 struct ata_host *host = ap->host;
657 unsigned int port_no = ap->port_no;
658 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
659 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
660 u8 seq = (u8) (port_no + 1);
661 unsigned int port_ofs;
663 /* hard-code chip #0 */
664 mmio += PDC_CHIP0_OFS;
666 VPRINTK("ata%u: ENTER\n", ap->print_id);
668 wmb(); /* flush PRD, pkt writes */
670 port_ofs = PDC_20621_DIMM_BASE + (PDC_DIMM_WINDOW_STEP * port_no);
672 /* if writing, we (1) DMA to DIMM, then (2) do ATA command */
673 if (rw && qc->tf.protocol == ATA_PROT_DMA) {
674 seq += 4;
676 pdc20621_dump_hdma(qc);
677 pdc20621_push_hdma(qc, seq, port_ofs + PDC_DIMM_HOST_PKT);
678 VPRINTK("queued ofs 0x%x (%u), seq %u\n",
679 port_ofs + PDC_DIMM_HOST_PKT,
680 port_ofs + PDC_DIMM_HOST_PKT,
681 seq);
682 } else {
683 writel(0x00000001, mmio + PDC_20621_SEQCTL + (seq * 4));
684 readl(mmio + PDC_20621_SEQCTL + (seq * 4)); /* flush */
686 writel(port_ofs + PDC_DIMM_ATA_PKT,
687 ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
688 readl(ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
689 VPRINTK("submitted ofs 0x%x (%u), seq %u\n",
690 port_ofs + PDC_DIMM_ATA_PKT,
691 port_ofs + PDC_DIMM_ATA_PKT,
692 seq);
696 static unsigned int pdc20621_qc_issue_prot(struct ata_queued_cmd *qc)
698 switch (qc->tf.protocol) {
699 case ATA_PROT_DMA:
700 case ATA_PROT_NODATA:
701 pdc20621_packet_start(qc);
702 return 0;
704 case ATAPI_PROT_DMA:
705 BUG();
706 break;
708 default:
709 break;
712 return ata_qc_issue_prot(qc);
715 static inline unsigned int pdc20621_host_intr(struct ata_port *ap,
716 struct ata_queued_cmd *qc,
717 unsigned int doing_hdma,
718 void __iomem *mmio)
720 unsigned int port_no = ap->port_no;
721 unsigned int port_ofs =
722 PDC_20621_DIMM_BASE + (PDC_DIMM_WINDOW_STEP * port_no);
723 u8 status;
724 unsigned int handled = 0;
726 VPRINTK("ENTER\n");
728 if ((qc->tf.protocol == ATA_PROT_DMA) && /* read */
729 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
731 /* step two - DMA from DIMM to host */
732 if (doing_hdma) {
733 VPRINTK("ata%u: read hdma, 0x%x 0x%x\n", ap->print_id,
734 readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
735 /* get drive status; clear intr; complete txn */
736 qc->err_mask |= ac_err_mask(ata_wait_idle(ap));
737 ata_qc_complete(qc);
738 pdc20621_pop_hdma(qc);
741 /* step one - exec ATA command */
742 else {
743 u8 seq = (u8) (port_no + 1 + 4);
744 VPRINTK("ata%u: read ata, 0x%x 0x%x\n", ap->print_id,
745 readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
747 /* submit hdma pkt */
748 pdc20621_dump_hdma(qc);
749 pdc20621_push_hdma(qc, seq,
750 port_ofs + PDC_DIMM_HOST_PKT);
752 handled = 1;
754 } else if (qc->tf.protocol == ATA_PROT_DMA) { /* write */
756 /* step one - DMA from host to DIMM */
757 if (doing_hdma) {
758 u8 seq = (u8) (port_no + 1);
759 VPRINTK("ata%u: write hdma, 0x%x 0x%x\n", ap->print_id,
760 readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
762 /* submit ata pkt */
763 writel(0x00000001, mmio + PDC_20621_SEQCTL + (seq * 4));
764 readl(mmio + PDC_20621_SEQCTL + (seq * 4));
765 writel(port_ofs + PDC_DIMM_ATA_PKT,
766 ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
767 readl(ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
770 /* step two - execute ATA command */
771 else {
772 VPRINTK("ata%u: write ata, 0x%x 0x%x\n", ap->print_id,
773 readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
774 /* get drive status; clear intr; complete txn */
775 qc->err_mask |= ac_err_mask(ata_wait_idle(ap));
776 ata_qc_complete(qc);
777 pdc20621_pop_hdma(qc);
779 handled = 1;
781 /* command completion, but no data xfer */
782 } else if (qc->tf.protocol == ATA_PROT_NODATA) {
784 status = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000);
785 DPRINTK("BUS_NODATA (drv_stat 0x%X)\n", status);
786 qc->err_mask |= ac_err_mask(status);
787 ata_qc_complete(qc);
788 handled = 1;
790 } else {
791 ap->stats.idle_irq++;
794 return handled;
797 static void pdc20621_irq_clear(struct ata_port *ap)
799 struct ata_host *host = ap->host;
800 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
802 mmio += PDC_CHIP0_OFS;
804 readl(mmio + PDC_20621_SEQMASK);
807 static irqreturn_t pdc20621_interrupt(int irq, void *dev_instance)
809 struct ata_host *host = dev_instance;
810 struct ata_port *ap;
811 u32 mask = 0;
812 unsigned int i, tmp, port_no;
813 unsigned int handled = 0;
814 void __iomem *mmio_base;
816 VPRINTK("ENTER\n");
818 if (!host || !host->iomap[PDC_MMIO_BAR]) {
819 VPRINTK("QUICK EXIT\n");
820 return IRQ_NONE;
823 mmio_base = host->iomap[PDC_MMIO_BAR];
825 /* reading should also clear interrupts */
826 mmio_base += PDC_CHIP0_OFS;
827 mask = readl(mmio_base + PDC_20621_SEQMASK);
828 VPRINTK("mask == 0x%x\n", mask);
830 if (mask == 0xffffffff) {
831 VPRINTK("QUICK EXIT 2\n");
832 return IRQ_NONE;
834 mask &= 0xffff; /* only 16 tags possible */
835 if (!mask) {
836 VPRINTK("QUICK EXIT 3\n");
837 return IRQ_NONE;
840 spin_lock(&host->lock);
842 for (i = 1; i < 9; i++) {
843 port_no = i - 1;
844 if (port_no > 3)
845 port_no -= 4;
846 if (port_no >= host->n_ports)
847 ap = NULL;
848 else
849 ap = host->ports[port_no];
850 tmp = mask & (1 << i);
851 VPRINTK("seq %u, port_no %u, ap %p, tmp %x\n", i, port_no, ap, tmp);
852 if (tmp && ap &&
853 !(ap->flags & ATA_FLAG_DISABLED)) {
854 struct ata_queued_cmd *qc;
856 qc = ata_qc_from_tag(ap, ap->link.active_tag);
857 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
858 handled += pdc20621_host_intr(ap, qc, (i > 4),
859 mmio_base);
863 spin_unlock(&host->lock);
865 VPRINTK("mask == 0x%x\n", mask);
867 VPRINTK("EXIT\n");
869 return IRQ_RETVAL(handled);
872 static void pdc_eng_timeout(struct ata_port *ap)
874 u8 drv_stat;
875 struct ata_host *host = ap->host;
876 struct ata_queued_cmd *qc;
877 unsigned long flags;
879 DPRINTK("ENTER\n");
881 spin_lock_irqsave(&host->lock, flags);
883 qc = ata_qc_from_tag(ap, ap->link.active_tag);
885 switch (qc->tf.protocol) {
886 case ATA_PROT_DMA:
887 case ATA_PROT_NODATA:
888 ata_port_printk(ap, KERN_ERR, "command timeout\n");
889 qc->err_mask |= __ac_err_mask(ata_wait_idle(ap));
890 break;
892 default:
893 drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000);
895 ata_port_printk(ap, KERN_ERR,
896 "unknown timeout, cmd 0x%x stat 0x%x\n",
897 qc->tf.command, drv_stat);
899 qc->err_mask |= ac_err_mask(drv_stat);
900 break;
903 spin_unlock_irqrestore(&host->lock, flags);
904 ata_eh_qc_complete(qc);
905 DPRINTK("EXIT\n");
908 static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
910 WARN_ON(tf->protocol == ATA_PROT_DMA ||
911 tf->protocol == ATA_PROT_NODATA);
912 ata_tf_load(ap, tf);
916 static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
918 WARN_ON(tf->protocol == ATA_PROT_DMA ||
919 tf->protocol == ATA_PROT_NODATA);
920 ata_exec_command(ap, tf);
924 static void pdc_sata_setup_port(struct ata_ioports *port, void __iomem *base)
926 port->cmd_addr = base;
927 port->data_addr = base;
928 port->feature_addr =
929 port->error_addr = base + 0x4;
930 port->nsect_addr = base + 0x8;
931 port->lbal_addr = base + 0xc;
932 port->lbam_addr = base + 0x10;
933 port->lbah_addr = base + 0x14;
934 port->device_addr = base + 0x18;
935 port->command_addr =
936 port->status_addr = base + 0x1c;
937 port->altstatus_addr =
938 port->ctl_addr = base + 0x38;
942 #ifdef ATA_VERBOSE_DEBUG
943 static void pdc20621_get_from_dimm(struct ata_host *host, void *psource,
944 u32 offset, u32 size)
946 u32 window_size;
947 u16 idx;
948 u8 page_mask;
949 long dist;
950 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
951 void __iomem *dimm_mmio = host->iomap[PDC_DIMM_BAR];
953 /* hard-code chip #0 */
954 mmio += PDC_CHIP0_OFS;
956 page_mask = 0x00;
957 window_size = 0x2000 * 4; /* 32K byte uchar size */
958 idx = (u16) (offset / window_size);
960 writel(0x01, mmio + PDC_GENERAL_CTLR);
961 readl(mmio + PDC_GENERAL_CTLR);
962 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
963 readl(mmio + PDC_DIMM_WINDOW_CTLR);
965 offset -= (idx * window_size);
966 idx++;
967 dist = ((long) (window_size - (offset + size))) >= 0 ? size :
968 (long) (window_size - offset);
969 memcpy_fromio((char *) psource, (char *) (dimm_mmio + offset / 4),
970 dist);
972 psource += dist;
973 size -= dist;
974 for (; (long) size >= (long) window_size ;) {
975 writel(0x01, mmio + PDC_GENERAL_CTLR);
976 readl(mmio + PDC_GENERAL_CTLR);
977 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
978 readl(mmio + PDC_DIMM_WINDOW_CTLR);
979 memcpy_fromio((char *) psource, (char *) (dimm_mmio),
980 window_size / 4);
981 psource += window_size;
982 size -= window_size;
983 idx++;
986 if (size) {
987 writel(0x01, mmio + PDC_GENERAL_CTLR);
988 readl(mmio + PDC_GENERAL_CTLR);
989 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
990 readl(mmio + PDC_DIMM_WINDOW_CTLR);
991 memcpy_fromio((char *) psource, (char *) (dimm_mmio),
992 size / 4);
995 #endif
998 static void pdc20621_put_to_dimm(struct ata_host *host, void *psource,
999 u32 offset, u32 size)
1001 u32 window_size;
1002 u16 idx;
1003 u8 page_mask;
1004 long dist;
1005 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1006 void __iomem *dimm_mmio = host->iomap[PDC_DIMM_BAR];
1008 /* hard-code chip #0 */
1009 mmio += PDC_CHIP0_OFS;
1011 page_mask = 0x00;
1012 window_size = 0x2000 * 4; /* 32K byte uchar size */
1013 idx = (u16) (offset / window_size);
1015 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1016 readl(mmio + PDC_DIMM_WINDOW_CTLR);
1017 offset -= (idx * window_size);
1018 idx++;
1019 dist = ((long)(s32)(window_size - (offset + size))) >= 0 ? size :
1020 (long) (window_size - offset);
1021 memcpy_toio(dimm_mmio + offset / 4, psource, dist);
1022 writel(0x01, mmio + PDC_GENERAL_CTLR);
1023 readl(mmio + PDC_GENERAL_CTLR);
1025 psource += dist;
1026 size -= dist;
1027 for (; (long) size >= (long) window_size ;) {
1028 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1029 readl(mmio + PDC_DIMM_WINDOW_CTLR);
1030 memcpy_toio(dimm_mmio, psource, window_size / 4);
1031 writel(0x01, mmio + PDC_GENERAL_CTLR);
1032 readl(mmio + PDC_GENERAL_CTLR);
1033 psource += window_size;
1034 size -= window_size;
1035 idx++;
1038 if (size) {
1039 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1040 readl(mmio + PDC_DIMM_WINDOW_CTLR);
1041 memcpy_toio(dimm_mmio, psource, size / 4);
1042 writel(0x01, mmio + PDC_GENERAL_CTLR);
1043 readl(mmio + PDC_GENERAL_CTLR);
1048 static unsigned int pdc20621_i2c_read(struct ata_host *host, u32 device,
1049 u32 subaddr, u32 *pdata)
1051 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1052 u32 i2creg = 0;
1053 u32 status;
1054 u32 count = 0;
1056 /* hard-code chip #0 */
1057 mmio += PDC_CHIP0_OFS;
1059 i2creg |= device << 24;
1060 i2creg |= subaddr << 16;
1062 /* Set the device and subaddress */
1063 writel(i2creg, mmio + PDC_I2C_ADDR_DATA);
1064 readl(mmio + PDC_I2C_ADDR_DATA);
1066 /* Write Control to perform read operation, mask int */
1067 writel(PDC_I2C_READ | PDC_I2C_START | PDC_I2C_MASK_INT,
1068 mmio + PDC_I2C_CONTROL);
1070 for (count = 0; count <= 1000; count ++) {
1071 status = readl(mmio + PDC_I2C_CONTROL);
1072 if (status & PDC_I2C_COMPLETE) {
1073 status = readl(mmio + PDC_I2C_ADDR_DATA);
1074 break;
1075 } else if (count == 1000)
1076 return 0;
1079 *pdata = (status >> 8) & 0x000000ff;
1080 return 1;
1084 static int pdc20621_detect_dimm(struct ata_host *host)
1086 u32 data = 0;
1087 if (pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
1088 PDC_DIMM_SPD_SYSTEM_FREQ, &data)) {
1089 if (data == 100)
1090 return 100;
1091 } else
1092 return 0;
1094 if (pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS, 9, &data)) {
1095 if (data <= 0x75)
1096 return 133;
1097 } else
1098 return 0;
1100 return 0;
1104 static int pdc20621_prog_dimm0(struct ata_host *host)
1106 u32 spd0[50];
1107 u32 data = 0;
1108 int size, i;
1109 u8 bdimmsize;
1110 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1111 static const struct {
1112 unsigned int reg;
1113 unsigned int ofs;
1114 } pdc_i2c_read_data [] = {
1115 { PDC_DIMM_SPD_TYPE, 11 },
1116 { PDC_DIMM_SPD_FRESH_RATE, 12 },
1117 { PDC_DIMM_SPD_COLUMN_NUM, 4 },
1118 { PDC_DIMM_SPD_ATTRIBUTE, 21 },
1119 { PDC_DIMM_SPD_ROW_NUM, 3 },
1120 { PDC_DIMM_SPD_BANK_NUM, 17 },
1121 { PDC_DIMM_SPD_MODULE_ROW, 5 },
1122 { PDC_DIMM_SPD_ROW_PRE_CHARGE, 27 },
1123 { PDC_DIMM_SPD_ROW_ACTIVE_DELAY, 28 },
1124 { PDC_DIMM_SPD_RAS_CAS_DELAY, 29 },
1125 { PDC_DIMM_SPD_ACTIVE_PRECHARGE, 30 },
1126 { PDC_DIMM_SPD_CAS_LATENCY, 18 },
1129 /* hard-code chip #0 */
1130 mmio += PDC_CHIP0_OFS;
1132 for (i = 0; i < ARRAY_SIZE(pdc_i2c_read_data); i++)
1133 pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
1134 pdc_i2c_read_data[i].reg,
1135 &spd0[pdc_i2c_read_data[i].ofs]);
1137 data |= (spd0[4] - 8) | ((spd0[21] != 0) << 3) | ((spd0[3]-11) << 4);
1138 data |= ((spd0[17] / 4) << 6) | ((spd0[5] / 2) << 7) |
1139 ((((spd0[27] + 9) / 10) - 1) << 8) ;
1140 data |= (((((spd0[29] > spd0[28])
1141 ? spd0[29] : spd0[28]) + 9) / 10) - 1) << 10;
1142 data |= ((spd0[30] - spd0[29] + 9) / 10 - 2) << 12;
1144 if (spd0[18] & 0x08)
1145 data |= ((0x03) << 14);
1146 else if (spd0[18] & 0x04)
1147 data |= ((0x02) << 14);
1148 else if (spd0[18] & 0x01)
1149 data |= ((0x01) << 14);
1150 else
1151 data |= (0 << 14);
1154 Calculate the size of bDIMMSize (power of 2) and
1155 merge the DIMM size by program start/end address.
1158 bdimmsize = spd0[4] + (spd0[5] / 2) + spd0[3] + (spd0[17] / 2) + 3;
1159 size = (1 << bdimmsize) >> 20; /* size = xxx(MB) */
1160 data |= (((size / 16) - 1) << 16);
1161 data |= (0 << 23);
1162 data |= 8;
1163 writel(data, mmio + PDC_DIMM0_CONTROL);
1164 readl(mmio + PDC_DIMM0_CONTROL);
1165 return size;
1169 static unsigned int pdc20621_prog_dimm_global(struct ata_host *host)
1171 u32 data, spd0;
1172 int error, i;
1173 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1175 /* hard-code chip #0 */
1176 mmio += PDC_CHIP0_OFS;
1179 Set To Default : DIMM Module Global Control Register (0x022259F1)
1180 DIMM Arbitration Disable (bit 20)
1181 DIMM Data/Control Output Driving Selection (bit12 - bit15)
1182 Refresh Enable (bit 17)
1185 data = 0x022259F1;
1186 writel(data, mmio + PDC_SDRAM_CONTROL);
1187 readl(mmio + PDC_SDRAM_CONTROL);
1189 /* Turn on for ECC */
1190 pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
1191 PDC_DIMM_SPD_TYPE, &spd0);
1192 if (spd0 == 0x02) {
1193 data |= (0x01 << 16);
1194 writel(data, mmio + PDC_SDRAM_CONTROL);
1195 readl(mmio + PDC_SDRAM_CONTROL);
1196 printk(KERN_ERR "Local DIMM ECC Enabled\n");
1199 /* DIMM Initialization Select/Enable (bit 18/19) */
1200 data &= (~(1<<18));
1201 data |= (1<<19);
1202 writel(data, mmio + PDC_SDRAM_CONTROL);
1204 error = 1;
1205 for (i = 1; i <= 10; i++) { /* polling ~5 secs */
1206 data = readl(mmio + PDC_SDRAM_CONTROL);
1207 if (!(data & (1<<19))) {
1208 error = 0;
1209 break;
1211 msleep(i*100);
1213 return error;
1217 static unsigned int pdc20621_dimm_init(struct ata_host *host)
1219 int speed, size, length;
1220 u32 addr, spd0, pci_status;
1221 u32 tmp = 0;
1222 u32 time_period = 0;
1223 u32 tcount = 0;
1224 u32 ticks = 0;
1225 u32 clock = 0;
1226 u32 fparam = 0;
1227 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1229 /* hard-code chip #0 */
1230 mmio += PDC_CHIP0_OFS;
1232 /* Initialize PLL based upon PCI Bus Frequency */
1234 /* Initialize Time Period Register */
1235 writel(0xffffffff, mmio + PDC_TIME_PERIOD);
1236 time_period = readl(mmio + PDC_TIME_PERIOD);
1237 VPRINTK("Time Period Register (0x40): 0x%x\n", time_period);
1239 /* Enable timer */
1240 writel(PDC_TIMER_DEFAULT, mmio + PDC_TIME_CONTROL);
1241 readl(mmio + PDC_TIME_CONTROL);
1243 /* Wait 3 seconds */
1244 msleep(3000);
1247 When timer is enabled, counter is decreased every internal
1248 clock cycle.
1251 tcount = readl(mmio + PDC_TIME_COUNTER);
1252 VPRINTK("Time Counter Register (0x44): 0x%x\n", tcount);
1255 If SX4 is on PCI-X bus, after 3 seconds, the timer counter
1256 register should be >= (0xffffffff - 3x10^8).
1258 if (tcount >= PCI_X_TCOUNT) {
1259 ticks = (time_period - tcount);
1260 VPRINTK("Num counters 0x%x (%d)\n", ticks, ticks);
1262 clock = (ticks / 300000);
1263 VPRINTK("10 * Internal clk = 0x%x (%d)\n", clock, clock);
1265 clock = (clock * 33);
1266 VPRINTK("10 * Internal clk * 33 = 0x%x (%d)\n", clock, clock);
1268 /* PLL F Param (bit 22:16) */
1269 fparam = (1400000 / clock) - 2;
1270 VPRINTK("PLL F Param: 0x%x (%d)\n", fparam, fparam);
1272 /* OD param = 0x2 (bit 31:30), R param = 0x5 (bit 29:25) */
1273 pci_status = (0x8a001824 | (fparam << 16));
1274 } else
1275 pci_status = PCI_PLL_INIT;
1277 /* Initialize PLL. */
1278 VPRINTK("pci_status: 0x%x\n", pci_status);
1279 writel(pci_status, mmio + PDC_CTL_STATUS);
1280 readl(mmio + PDC_CTL_STATUS);
1283 Read SPD of DIMM by I2C interface,
1284 and program the DIMM Module Controller.
1286 if (!(speed = pdc20621_detect_dimm(host))) {
1287 printk(KERN_ERR "Detect Local DIMM Fail\n");
1288 return 1; /* DIMM error */
1290 VPRINTK("Local DIMM Speed = %d\n", speed);
1292 /* Programming DIMM0 Module Control Register (index_CID0:80h) */
1293 size = pdc20621_prog_dimm0(host);
1294 VPRINTK("Local DIMM Size = %dMB\n", size);
1296 /* Programming DIMM Module Global Control Register (index_CID0:88h) */
1297 if (pdc20621_prog_dimm_global(host)) {
1298 printk(KERN_ERR "Programming DIMM Module Global Control Register Fail\n");
1299 return 1;
1302 #ifdef ATA_VERBOSE_DEBUG
1304 u8 test_parttern1[40] =
1305 {0x55,0xAA,'P','r','o','m','i','s','e',' ',
1306 'N','o','t',' ','Y','e','t',' ',
1307 'D','e','f','i','n','e','d',' ',
1308 '1','.','1','0',
1309 '9','8','0','3','1','6','1','2',0,0};
1310 u8 test_parttern2[40] = {0};
1312 pdc20621_put_to_dimm(host, test_parttern2, 0x10040, 40);
1313 pdc20621_put_to_dimm(host, test_parttern2, 0x40, 40);
1315 pdc20621_put_to_dimm(host, test_parttern1, 0x10040, 40);
1316 pdc20621_get_from_dimm(host, test_parttern2, 0x40, 40);
1317 printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0],
1318 test_parttern2[1], &(test_parttern2[2]));
1319 pdc20621_get_from_dimm(host, test_parttern2, 0x10040,
1320 40);
1321 printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0],
1322 test_parttern2[1], &(test_parttern2[2]));
1324 pdc20621_put_to_dimm(host, test_parttern1, 0x40, 40);
1325 pdc20621_get_from_dimm(host, test_parttern2, 0x40, 40);
1326 printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0],
1327 test_parttern2[1], &(test_parttern2[2]));
1329 #endif
1331 /* ECC initiliazation. */
1333 pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
1334 PDC_DIMM_SPD_TYPE, &spd0);
1335 if (spd0 == 0x02) {
1336 VPRINTK("Start ECC initialization\n");
1337 addr = 0;
1338 length = size * 1024 * 1024;
1339 while (addr < length) {
1340 pdc20621_put_to_dimm(host, (void *) &tmp, addr,
1341 sizeof(u32));
1342 addr += sizeof(u32);
1344 VPRINTK("Finish ECC initialization\n");
1346 return 0;
1350 static void pdc_20621_init(struct ata_host *host)
1352 u32 tmp;
1353 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1355 /* hard-code chip #0 */
1356 mmio += PDC_CHIP0_OFS;
1359 * Select page 0x40 for our 32k DIMM window
1361 tmp = readl(mmio + PDC_20621_DIMM_WINDOW) & 0xffff0000;
1362 tmp |= PDC_PAGE_WINDOW; /* page 40h; arbitrarily selected */
1363 writel(tmp, mmio + PDC_20621_DIMM_WINDOW);
1366 * Reset Host DMA
1368 tmp = readl(mmio + PDC_HDMA_CTLSTAT);
1369 tmp |= PDC_RESET;
1370 writel(tmp, mmio + PDC_HDMA_CTLSTAT);
1371 readl(mmio + PDC_HDMA_CTLSTAT); /* flush */
1373 udelay(10);
1375 tmp = readl(mmio + PDC_HDMA_CTLSTAT);
1376 tmp &= ~PDC_RESET;
1377 writel(tmp, mmio + PDC_HDMA_CTLSTAT);
1378 readl(mmio + PDC_HDMA_CTLSTAT); /* flush */
1381 static int pdc_sata_init_one(struct pci_dev *pdev,
1382 const struct pci_device_id *ent)
1384 static int printed_version;
1385 const struct ata_port_info *ppi[] =
1386 { &pdc_port_info[ent->driver_data], NULL };
1387 struct ata_host *host;
1388 struct pdc_host_priv *hpriv;
1389 int i, rc;
1391 if (!printed_version++)
1392 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
1394 /* allocate host */
1395 host = ata_host_alloc_pinfo(&pdev->dev, ppi, 4);
1396 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
1397 if (!host || !hpriv)
1398 return -ENOMEM;
1400 host->private_data = hpriv;
1402 /* acquire resources and fill host */
1403 rc = pcim_enable_device(pdev);
1404 if (rc)
1405 return rc;
1407 rc = pcim_iomap_regions(pdev, (1 << PDC_MMIO_BAR) | (1 << PDC_DIMM_BAR),
1408 DRV_NAME);
1409 if (rc == -EBUSY)
1410 pcim_pin_device(pdev);
1411 if (rc)
1412 return rc;
1413 host->iomap = pcim_iomap_table(pdev);
1415 for (i = 0; i < 4; i++) {
1416 struct ata_port *ap = host->ports[i];
1417 void __iomem *base = host->iomap[PDC_MMIO_BAR] + PDC_CHIP0_OFS;
1418 unsigned int offset = 0x200 + i * 0x80;
1420 pdc_sata_setup_port(&ap->ioaddr, base + offset);
1422 ata_port_pbar_desc(ap, PDC_MMIO_BAR, -1, "mmio");
1423 ata_port_pbar_desc(ap, PDC_DIMM_BAR, -1, "dimm");
1424 ata_port_pbar_desc(ap, PDC_MMIO_BAR, offset, "port");
1427 /* configure and activate */
1428 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
1429 if (rc)
1430 return rc;
1431 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
1432 if (rc)
1433 return rc;
1435 if (pdc20621_dimm_init(host))
1436 return -ENOMEM;
1437 pdc_20621_init(host);
1439 pci_set_master(pdev);
1440 return ata_host_activate(host, pdev->irq, pdc20621_interrupt,
1441 IRQF_SHARED, &pdc_sata_sht);
1445 static int __init pdc_sata_init(void)
1447 return pci_register_driver(&pdc_sata_pci_driver);
1451 static void __exit pdc_sata_exit(void)
1453 pci_unregister_driver(&pdc_sata_pci_driver);
1457 MODULE_AUTHOR("Jeff Garzik");
1458 MODULE_DESCRIPTION("Promise SATA low-level driver");
1459 MODULE_LICENSE("GPL");
1460 MODULE_DEVICE_TABLE(pci, pdc_sata_pci_tbl);
1461 MODULE_VERSION(DRV_VERSION);
1463 module_init(pdc_sata_init);
1464 module_exit(pdc_sata_exit);