1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * pdc_adma.c - Pacific Digital Corporation ADMA
5 * Maintained by: Tejun Heo <tj@kernel.org>
7 * Copyright 2005 Mark Lord
9 * libata documentation is available via 'make {ps|pdf}docs',
10 * as Documentation/driver-api/libata.rst
12 * Supports ATA disks in single-packet ADMA mode.
13 * Uses PIO for everything else.
15 * TODO: Use ADMA transfers for ATAPI devices, when possible.
16 * This requires careful attention to a number of quirks of the chip.
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/gfp.h>
22 #include <linux/pci.h>
23 #include <linux/blkdev.h>
24 #include <linux/delay.h>
25 #include <linux/interrupt.h>
26 #include <linux/device.h>
27 #include <scsi/scsi_host.h>
28 #include <linux/libata.h>
30 #define DRV_NAME "pdc_adma"
31 #define DRV_VERSION "1.0"
33 /* macro to calculate base address for ATA regs */
34 #define ADMA_ATA_REGS(base, port_no) ((base) + ((port_no) * 0x40))
36 /* macro to calculate base address for ADMA regs */
37 #define ADMA_REGS(base, port_no) ((base) + 0x80 + ((port_no) * 0x20))
39 /* macro to obtain addresses from ata_port */
40 #define ADMA_PORT_REGS(ap) \
41 ADMA_REGS((ap)->host->iomap[ADMA_MMIO_BAR], ap->port_no)
48 ADMA_PRD_BYTES
= LIBATA_MAX_PRD
* 16,
49 ADMA_PKT_BYTES
= ADMA_CPB_BYTES
+ ADMA_PRD_BYTES
,
51 ADMA_DMA_BOUNDARY
= 0xffffffff,
53 /* global register offsets */
54 ADMA_MODE_LOCK
= 0x00c7,
56 /* per-channel register offsets */
57 ADMA_CONTROL
= 0x0000, /* ADMA control */
58 ADMA_STATUS
= 0x0002, /* ADMA status */
59 ADMA_CPB_COUNT
= 0x0004, /* CPB count */
60 ADMA_CPB_CURRENT
= 0x000c, /* current CPB address */
61 ADMA_CPB_NEXT
= 0x000c, /* next CPB address */
62 ADMA_CPB_LOOKUP
= 0x0010, /* CPB lookup table */
63 ADMA_FIFO_IN
= 0x0014, /* input FIFO threshold */
64 ADMA_FIFO_OUT
= 0x0016, /* output FIFO threshold */
66 /* ADMA_CONTROL register bits */
67 aNIEN
= (1 << 8), /* irq mask: 1==masked */
68 aGO
= (1 << 7), /* packet trigger ("Go!") */
69 aRSTADM
= (1 << 5), /* ADMA logic reset */
70 aPIOMD4
= 0x0003, /* PIO mode 4 */
72 /* ADMA_STATUS register bits */
90 /* ATA register flags */
94 /* ATA register addresses */
95 ADMA_REGS_CONTROL
= 0x0e,
96 ADMA_REGS_SECTOR_COUNT
= 0x12,
97 ADMA_REGS_LBA_LOW
= 0x13,
98 ADMA_REGS_LBA_MID
= 0x14,
99 ADMA_REGS_LBA_HIGH
= 0x15,
100 ADMA_REGS_DEVICE
= 0x16,
101 ADMA_REGS_COMMAND
= 0x17,
104 board_1841_idx
= 0, /* ADMA 2-port controller */
107 typedef enum { adma_state_idle
, adma_state_pkt
, adma_state_mmio
} adma_state_t
;
109 struct adma_port_priv
{
115 static int adma_ata_init_one(struct pci_dev
*pdev
,
116 const struct pci_device_id
*ent
);
117 static int adma_port_start(struct ata_port
*ap
);
118 static void adma_port_stop(struct ata_port
*ap
);
119 static enum ata_completion_errors
adma_qc_prep(struct ata_queued_cmd
*qc
);
120 static unsigned int adma_qc_issue(struct ata_queued_cmd
*qc
);
121 static int adma_check_atapi_dma(struct ata_queued_cmd
*qc
);
122 static void adma_freeze(struct ata_port
*ap
);
123 static void adma_thaw(struct ata_port
*ap
);
124 static int adma_prereset(struct ata_link
*link
, unsigned long deadline
);
126 static const struct scsi_host_template adma_ata_sht
= {
127 ATA_BASE_SHT(DRV_NAME
),
128 .sg_tablesize
= LIBATA_MAX_PRD
,
129 .dma_boundary
= ADMA_DMA_BOUNDARY
,
132 static struct ata_port_operations adma_ata_ops
= {
133 .inherits
= &ata_sff_port_ops
,
135 .lost_interrupt
= ATA_OP_NULL
,
137 .check_atapi_dma
= adma_check_atapi_dma
,
138 .qc_prep
= adma_qc_prep
,
139 .qc_issue
= adma_qc_issue
,
141 .freeze
= adma_freeze
,
143 .prereset
= adma_prereset
,
145 .port_start
= adma_port_start
,
146 .port_stop
= adma_port_stop
,
149 static struct ata_port_info adma_port_info
[] = {
152 .flags
= ATA_FLAG_SLAVE_POSS
| ATA_FLAG_PIO_POLLING
,
153 .pio_mask
= ATA_PIO4_ONLY
,
154 .udma_mask
= ATA_UDMA4
,
155 .port_ops
= &adma_ata_ops
,
159 static const struct pci_device_id adma_ata_pci_tbl
[] = {
160 { PCI_VDEVICE(PDC
, 0x1841), board_1841_idx
},
162 { } /* terminate list */
165 static struct pci_driver adma_ata_pci_driver
= {
167 .id_table
= adma_ata_pci_tbl
,
168 .probe
= adma_ata_init_one
,
169 .remove
= ata_pci_remove_one
,
172 static int adma_check_atapi_dma(struct ata_queued_cmd
*qc
)
174 return 1; /* ATAPI DMA not yet supported */
177 static void adma_reset_engine(struct ata_port
*ap
)
179 void __iomem
*chan
= ADMA_PORT_REGS(ap
);
181 /* reset ADMA to idle state */
182 writew(aPIOMD4
| aNIEN
| aRSTADM
, chan
+ ADMA_CONTROL
);
184 writew(aPIOMD4
, chan
+ ADMA_CONTROL
);
188 static void adma_reinit_engine(struct ata_port
*ap
)
190 struct adma_port_priv
*pp
= ap
->private_data
;
191 void __iomem
*chan
= ADMA_PORT_REGS(ap
);
193 /* mask/clear ATA interrupts */
194 writeb(ATA_NIEN
, ap
->ioaddr
.ctl_addr
);
195 ata_sff_check_status(ap
);
197 /* reset the ADMA engine */
198 adma_reset_engine(ap
);
200 /* set in-FIFO threshold to 0x100 */
201 writew(0x100, chan
+ ADMA_FIFO_IN
);
203 /* set CPB pointer */
204 writel((u32
)pp
->pkt_dma
, chan
+ ADMA_CPB_NEXT
);
206 /* set out-FIFO threshold to 0x100 */
207 writew(0x100, chan
+ ADMA_FIFO_OUT
);
210 writew(1, chan
+ ADMA_CPB_COUNT
);
212 /* read/discard ADMA status */
213 readb(chan
+ ADMA_STATUS
);
216 static inline void adma_enter_reg_mode(struct ata_port
*ap
)
218 void __iomem
*chan
= ADMA_PORT_REGS(ap
);
220 writew(aPIOMD4
, chan
+ ADMA_CONTROL
);
221 readb(chan
+ ADMA_STATUS
); /* flush */
224 static void adma_freeze(struct ata_port
*ap
)
226 void __iomem
*chan
= ADMA_PORT_REGS(ap
);
228 /* mask/clear ATA interrupts */
229 writeb(ATA_NIEN
, ap
->ioaddr
.ctl_addr
);
230 ata_sff_check_status(ap
);
232 /* reset ADMA to idle state */
233 writew(aPIOMD4
| aNIEN
| aRSTADM
, chan
+ ADMA_CONTROL
);
235 writew(aPIOMD4
| aNIEN
, chan
+ ADMA_CONTROL
);
239 static void adma_thaw(struct ata_port
*ap
)
241 adma_reinit_engine(ap
);
244 static int adma_prereset(struct ata_link
*link
, unsigned long deadline
)
246 struct ata_port
*ap
= link
->ap
;
247 struct adma_port_priv
*pp
= ap
->private_data
;
249 if (pp
->state
!= adma_state_idle
) /* healthy paranoia */
250 pp
->state
= adma_state_mmio
;
251 adma_reinit_engine(ap
);
253 return ata_sff_prereset(link
, deadline
);
256 static int adma_fill_sg(struct ata_queued_cmd
*qc
)
258 struct scatterlist
*sg
;
259 struct ata_port
*ap
= qc
->ap
;
260 struct adma_port_priv
*pp
= ap
->private_data
;
261 u8
*buf
= pp
->pkt
, *last_buf
= NULL
;
262 int i
= (2 + buf
[3]) * 8;
263 u8 pFLAGS
= pORD
| ((qc
->tf
.flags
& ATA_TFLAG_WRITE
) ? pDIRO
: 0);
266 for_each_sg(qc
->sg
, sg
, qc
->n_elem
, si
) {
270 addr
= (u32
)sg_dma_address(sg
);
271 *(__le32
*)(buf
+ i
) = cpu_to_le32(addr
);
274 len
= sg_dma_len(sg
) >> 3;
275 *(__le32
*)(buf
+ i
) = cpu_to_le32(len
);
280 buf
[i
++] = qc
->dev
->dma_mode
& 0xf;
281 buf
[i
++] = 0; /* pPKLW */
282 buf
[i
++] = 0; /* reserved */
284 *(__le32
*)(buf
+ i
) =
285 (pFLAGS
& pEND
) ? 0 : cpu_to_le32(pp
->pkt_dma
+ i
+ 4);
289 if (likely(last_buf
))
295 static enum ata_completion_errors
adma_qc_prep(struct ata_queued_cmd
*qc
)
297 struct adma_port_priv
*pp
= qc
->ap
->private_data
;
299 u32 pkt_dma
= (u32
)pp
->pkt_dma
;
302 adma_enter_reg_mode(qc
->ap
);
303 if (qc
->tf
.protocol
!= ATA_PROT_DMA
)
306 buf
[i
++] = 0; /* Response flags */
307 buf
[i
++] = 0; /* reserved */
308 buf
[i
++] = cVLD
| cDAT
| cIEN
;
309 i
++; /* cLEN, gets filled in below */
311 *(__le32
*)(buf
+i
) = cpu_to_le32(pkt_dma
); /* cNCPB */
313 i
+= 4; /* cPRD, gets filled in below */
315 buf
[i
++] = 0; /* reserved */
316 buf
[i
++] = 0; /* reserved */
317 buf
[i
++] = 0; /* reserved */
318 buf
[i
++] = 0; /* reserved */
320 /* ATA registers; must be a multiple of 4 */
321 buf
[i
++] = qc
->tf
.device
;
322 buf
[i
++] = ADMA_REGS_DEVICE
;
323 if ((qc
->tf
.flags
& ATA_TFLAG_LBA48
)) {
324 buf
[i
++] = qc
->tf
.hob_nsect
;
325 buf
[i
++] = ADMA_REGS_SECTOR_COUNT
;
326 buf
[i
++] = qc
->tf
.hob_lbal
;
327 buf
[i
++] = ADMA_REGS_LBA_LOW
;
328 buf
[i
++] = qc
->tf
.hob_lbam
;
329 buf
[i
++] = ADMA_REGS_LBA_MID
;
330 buf
[i
++] = qc
->tf
.hob_lbah
;
331 buf
[i
++] = ADMA_REGS_LBA_HIGH
;
333 buf
[i
++] = qc
->tf
.nsect
;
334 buf
[i
++] = ADMA_REGS_SECTOR_COUNT
;
335 buf
[i
++] = qc
->tf
.lbal
;
336 buf
[i
++] = ADMA_REGS_LBA_LOW
;
337 buf
[i
++] = qc
->tf
.lbam
;
338 buf
[i
++] = ADMA_REGS_LBA_MID
;
339 buf
[i
++] = qc
->tf
.lbah
;
340 buf
[i
++] = ADMA_REGS_LBA_HIGH
;
342 buf
[i
++] = ADMA_REGS_CONTROL
;
345 buf
[i
++] = qc
->tf
.command
;
346 buf
[i
++] = ADMA_REGS_COMMAND
| rEND
;
348 buf
[3] = (i
>> 3) - 2; /* cLEN */
349 *(__le32
*)(buf
+8) = cpu_to_le32(pkt_dma
+ i
); /* cPRD */
351 i
= adma_fill_sg(qc
);
352 wmb(); /* flush PRDs and pkt to memory */
356 static inline void adma_packet_start(struct ata_queued_cmd
*qc
)
358 struct ata_port
*ap
= qc
->ap
;
359 void __iomem
*chan
= ADMA_PORT_REGS(ap
);
361 /* fire up the ADMA engine */
362 writew(aPIOMD4
| aGO
, chan
+ ADMA_CONTROL
);
365 static unsigned int adma_qc_issue(struct ata_queued_cmd
*qc
)
367 struct adma_port_priv
*pp
= qc
->ap
->private_data
;
369 switch (qc
->tf
.protocol
) {
371 pp
->state
= adma_state_pkt
;
372 adma_packet_start(qc
);
383 pp
->state
= adma_state_mmio
;
384 return ata_sff_qc_issue(qc
);
387 static inline unsigned int adma_intr_pkt(struct ata_host
*host
)
389 unsigned int handled
= 0, port_no
;
391 for (port_no
= 0; port_no
< host
->n_ports
; ++port_no
) {
392 struct ata_port
*ap
= host
->ports
[port_no
];
393 struct adma_port_priv
*pp
;
394 struct ata_queued_cmd
*qc
;
395 void __iomem
*chan
= ADMA_PORT_REGS(ap
);
396 u8 status
= readb(chan
+ ADMA_STATUS
);
401 adma_enter_reg_mode(ap
);
402 pp
= ap
->private_data
;
403 if (!pp
|| pp
->state
!= adma_state_pkt
)
405 qc
= ata_qc_from_tag(ap
, ap
->link
.active_tag
);
406 if (qc
&& (!(qc
->tf
.flags
& ATA_TFLAG_POLLING
))) {
408 qc
->err_mask
|= AC_ERR_HOST_BUS
;
409 else if ((status
& (aPSD
| aUIRQ
)))
410 qc
->err_mask
|= AC_ERR_OTHER
;
412 if (pp
->pkt
[0] & cATERR
)
413 qc
->err_mask
|= AC_ERR_DEV
;
414 else if (pp
->pkt
[0] != cDONE
)
415 qc
->err_mask
|= AC_ERR_OTHER
;
420 struct ata_eh_info
*ehi
= &ap
->link
.eh_info
;
421 ata_ehi_clear_desc(ehi
);
422 ata_ehi_push_desc(ehi
,
423 "ADMA-status 0x%02X", status
);
424 ata_ehi_push_desc(ehi
,
425 "pkt[0] 0x%02X", pp
->pkt
[0]);
427 if (qc
->err_mask
== AC_ERR_DEV
)
437 static inline unsigned int adma_intr_mmio(struct ata_host
*host
)
439 unsigned int handled
= 0, port_no
;
441 for (port_no
= 0; port_no
< host
->n_ports
; ++port_no
) {
442 struct ata_port
*ap
= host
->ports
[port_no
];
443 struct adma_port_priv
*pp
= ap
->private_data
;
444 struct ata_queued_cmd
*qc
;
446 if (!pp
|| pp
->state
!= adma_state_mmio
)
448 qc
= ata_qc_from_tag(ap
, ap
->link
.active_tag
);
449 if (qc
&& (!(qc
->tf
.flags
& ATA_TFLAG_POLLING
))) {
451 /* check main status, clearing INTRQ */
452 u8 status
= ata_sff_check_status(ap
);
453 if ((status
& ATA_BUSY
))
456 /* complete taskfile transaction */
457 pp
->state
= adma_state_idle
;
458 qc
->err_mask
|= ac_err_mask(status
);
462 struct ata_eh_info
*ehi
= &ap
->link
.eh_info
;
463 ata_ehi_clear_desc(ehi
);
464 ata_ehi_push_desc(ehi
, "status 0x%02X", status
);
466 if (qc
->err_mask
== AC_ERR_DEV
)
477 static irqreturn_t
adma_intr(int irq
, void *dev_instance
)
479 struct ata_host
*host
= dev_instance
;
480 unsigned int handled
= 0;
482 spin_lock(&host
->lock
);
483 handled
= adma_intr_pkt(host
) | adma_intr_mmio(host
);
484 spin_unlock(&host
->lock
);
486 return IRQ_RETVAL(handled
);
489 static void adma_ata_setup_port(struct ata_ioports
*port
, void __iomem
*base
)
492 port
->data_addr
= base
+ 0x000;
494 port
->feature_addr
= base
+ 0x004;
495 port
->nsect_addr
= base
+ 0x008;
496 port
->lbal_addr
= base
+ 0x00c;
497 port
->lbam_addr
= base
+ 0x010;
498 port
->lbah_addr
= base
+ 0x014;
499 port
->device_addr
= base
+ 0x018;
501 port
->command_addr
= base
+ 0x01c;
502 port
->altstatus_addr
=
503 port
->ctl_addr
= base
+ 0x038;
506 static int adma_port_start(struct ata_port
*ap
)
508 struct device
*dev
= ap
->host
->dev
;
509 struct adma_port_priv
*pp
;
511 adma_enter_reg_mode(ap
);
512 pp
= devm_kzalloc(dev
, sizeof(*pp
), GFP_KERNEL
);
515 pp
->pkt
= dmam_alloc_coherent(dev
, ADMA_PKT_BYTES
, &pp
->pkt_dma
,
520 if ((pp
->pkt_dma
& 7) != 0) {
521 ata_port_err(ap
, "bad alignment for pp->pkt_dma: %08x\n",
525 ap
->private_data
= pp
;
526 adma_reinit_engine(ap
);
530 static void adma_port_stop(struct ata_port
*ap
)
532 adma_reset_engine(ap
);
535 static void adma_host_init(struct ata_host
*host
, unsigned int chip_id
)
537 unsigned int port_no
;
539 /* enable/lock aGO operation */
540 writeb(7, host
->iomap
[ADMA_MMIO_BAR
] + ADMA_MODE_LOCK
);
542 /* reset the ADMA logic */
543 for (port_no
= 0; port_no
< ADMA_PORTS
; ++port_no
)
544 adma_reset_engine(host
->ports
[port_no
]);
547 static int adma_ata_init_one(struct pci_dev
*pdev
,
548 const struct pci_device_id
*ent
)
550 unsigned int board_idx
= (unsigned int) ent
->driver_data
;
551 const struct ata_port_info
*ppi
[] = { &adma_port_info
[board_idx
], NULL
};
552 struct ata_host
*host
;
553 void __iomem
*mmio_base
;
556 ata_print_version_once(&pdev
->dev
, DRV_VERSION
);
559 host
= ata_host_alloc_pinfo(&pdev
->dev
, ppi
, ADMA_PORTS
);
563 /* acquire resources and fill host */
564 rc
= pcim_enable_device(pdev
);
568 if ((pci_resource_flags(pdev
, 4) & IORESOURCE_MEM
) == 0)
571 rc
= pcim_iomap_regions(pdev
, 1 << ADMA_MMIO_BAR
, DRV_NAME
);
574 host
->iomap
= pcim_iomap_table(pdev
);
575 mmio_base
= host
->iomap
[ADMA_MMIO_BAR
];
577 rc
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(32));
579 dev_err(&pdev
->dev
, "32-bit DMA enable failed\n");
583 for (port_no
= 0; port_no
< ADMA_PORTS
; ++port_no
) {
584 struct ata_port
*ap
= host
->ports
[port_no
];
585 void __iomem
*port_base
= ADMA_ATA_REGS(mmio_base
, port_no
);
586 unsigned int offset
= port_base
- mmio_base
;
588 adma_ata_setup_port(&ap
->ioaddr
, port_base
);
590 ata_port_pbar_desc(ap
, ADMA_MMIO_BAR
, -1, "mmio");
591 ata_port_pbar_desc(ap
, ADMA_MMIO_BAR
, offset
, "port");
594 /* initialize adapter */
595 adma_host_init(host
, board_idx
);
597 pci_set_master(pdev
);
598 return ata_host_activate(host
, pdev
->irq
, adma_intr
, IRQF_SHARED
,
602 module_pci_driver(adma_ata_pci_driver
);
604 MODULE_AUTHOR("Mark Lord");
605 MODULE_DESCRIPTION("Pacific Digital Corporation ADMA low-level driver");
606 MODULE_LICENSE("GPL");
607 MODULE_DEVICE_TABLE(pci
, adma_ata_pci_tbl
);
608 MODULE_VERSION(DRV_VERSION
);