1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Generic PXA PATA driver
5 * Copyright (C) 2010 Marek Vasut <marek.vasut@gmail.com>
8 #include <linux/kernel.h>
9 #include <linux/module.h>
10 #include <linux/blkdev.h>
11 #include <linux/ata.h>
12 #include <linux/libata.h>
13 #include <linux/platform_device.h>
14 #include <linux/dmaengine.h>
15 #include <linux/slab.h>
16 #include <linux/completion.h>
18 #include <scsi/scsi_host.h>
20 #include <linux/platform_data/ata-pxa.h>
22 #define DRV_NAME "pata_pxa"
23 #define DRV_VERSION "0.1"
25 struct pata_pxa_data
{
26 struct dma_chan
*dma_chan
;
27 dma_cookie_t dma_cookie
;
28 struct completion dma_done
;
32 * DMA interrupt handler.
34 static void pxa_ata_dma_irq(void *d
)
36 struct pata_pxa_data
*pd
= d
;
37 enum dma_status status
;
39 status
= dmaengine_tx_status(pd
->dma_chan
, pd
->dma_cookie
, NULL
);
40 if (status
== DMA_ERROR
|| status
== DMA_COMPLETE
)
41 complete(&pd
->dma_done
);
45 * Prepare taskfile for submission.
47 static enum ata_completion_errors
pxa_qc_prep(struct ata_queued_cmd
*qc
)
49 struct pata_pxa_data
*pd
= qc
->ap
->private_data
;
50 struct dma_async_tx_descriptor
*tx
;
51 enum dma_transfer_direction dir
;
53 if (!(qc
->flags
& ATA_QCFLAG_DMAMAP
))
56 dir
= (qc
->dma_dir
== DMA_TO_DEVICE
? DMA_MEM_TO_DEV
: DMA_DEV_TO_MEM
);
57 tx
= dmaengine_prep_slave_sg(pd
->dma_chan
, qc
->sg
, qc
->n_elem
, dir
,
60 ata_dev_err(qc
->dev
, "prep_slave_sg() failed\n");
63 tx
->callback
= pxa_ata_dma_irq
;
64 tx
->callback_param
= pd
;
65 pd
->dma_cookie
= dmaengine_submit(tx
);
71 * Configure the DMA controller, load the DMA descriptors, but don't start the
72 * DMA controller yet. Only issue the ATA command.
74 static void pxa_bmdma_setup(struct ata_queued_cmd
*qc
)
76 qc
->ap
->ops
->sff_exec_command(qc
->ap
, &qc
->tf
);
80 * Execute the DMA transfer.
82 static void pxa_bmdma_start(struct ata_queued_cmd
*qc
)
84 struct pata_pxa_data
*pd
= qc
->ap
->private_data
;
85 init_completion(&pd
->dma_done
);
86 dma_async_issue_pending(pd
->dma_chan
);
90 * Wait until the DMA transfer completes, then stop the DMA controller.
92 static void pxa_bmdma_stop(struct ata_queued_cmd
*qc
)
94 struct pata_pxa_data
*pd
= qc
->ap
->private_data
;
95 enum dma_status status
;
97 status
= dmaengine_tx_status(pd
->dma_chan
, pd
->dma_cookie
, NULL
);
98 if (status
!= DMA_ERROR
&& status
!= DMA_COMPLETE
&&
99 wait_for_completion_timeout(&pd
->dma_done
, HZ
))
100 ata_dev_err(qc
->dev
, "Timeout waiting for DMA completion!");
102 dmaengine_terminate_all(pd
->dma_chan
);
106 * Read DMA status. The bmdma_stop() will take care of properly finishing the
107 * DMA transfer so we always have DMA-complete interrupt here.
109 static unsigned char pxa_bmdma_status(struct ata_port
*ap
)
111 struct pata_pxa_data
*pd
= ap
->private_data
;
112 unsigned char ret
= ATA_DMA_INTR
;
113 struct dma_tx_state state
;
114 enum dma_status status
;
116 status
= dmaengine_tx_status(pd
->dma_chan
, pd
->dma_cookie
, &state
);
117 if (status
!= DMA_COMPLETE
)
124 * No IRQ register present so we do nothing.
126 static void pxa_irq_clear(struct ata_port
*ap
)
131 * Check for ATAPI DMA. ATAPI DMA is unsupported by this driver. It's still
132 * unclear why ATAPI has DMA issues.
134 static int pxa_check_atapi_dma(struct ata_queued_cmd
*qc
)
139 static struct scsi_host_template pxa_ata_sht
= {
140 ATA_BMDMA_SHT(DRV_NAME
),
143 static struct ata_port_operations pxa_ata_port_ops
= {
144 .inherits
= &ata_bmdma_port_ops
,
145 .cable_detect
= ata_cable_40wire
,
147 .bmdma_setup
= pxa_bmdma_setup
,
148 .bmdma_start
= pxa_bmdma_start
,
149 .bmdma_stop
= pxa_bmdma_stop
,
150 .bmdma_status
= pxa_bmdma_status
,
152 .check_atapi_dma
= pxa_check_atapi_dma
,
154 .sff_irq_clear
= pxa_irq_clear
,
156 .qc_prep
= pxa_qc_prep
,
159 static int pxa_ata_probe(struct platform_device
*pdev
)
161 struct ata_host
*host
;
163 struct pata_pxa_data
*data
;
164 struct resource
*cmd_res
;
165 struct resource
*ctl_res
;
166 struct resource
*dma_res
;
167 struct resource
*irq_res
;
168 struct pata_pxa_pdata
*pdata
= dev_get_platdata(&pdev
->dev
);
169 struct dma_slave_config config
;
173 * Resource validation, three resources are needed:
174 * - CMD port base address
175 * - CTL port base address
176 * - DMA port base address
179 if (pdev
->num_resources
!= 4) {
180 dev_err(&pdev
->dev
, "invalid number of resources\n");
185 * CMD port base address
187 cmd_res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
188 if (unlikely(cmd_res
== NULL
))
192 * CTL port base address
194 ctl_res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 1);
195 if (unlikely(ctl_res
== NULL
))
199 * DMA port base address
201 dma_res
= platform_get_resource(pdev
, IORESOURCE_DMA
, 0);
202 if (unlikely(dma_res
== NULL
))
208 irq_res
= platform_get_resource(pdev
, IORESOURCE_IRQ
, 0);
209 if (unlikely(irq_res
== NULL
))
215 host
= ata_host_alloc(&pdev
->dev
, 1);
220 ap
->ops
= &pxa_ata_port_ops
;
221 ap
->pio_mask
= ATA_PIO4
;
222 ap
->mwdma_mask
= ATA_MWDMA2
;
224 ap
->ioaddr
.cmd_addr
= devm_ioremap(&pdev
->dev
, cmd_res
->start
,
225 resource_size(cmd_res
));
226 ap
->ioaddr
.ctl_addr
= devm_ioremap(&pdev
->dev
, ctl_res
->start
,
227 resource_size(ctl_res
));
228 ap
->ioaddr
.bmdma_addr
= devm_ioremap(&pdev
->dev
, dma_res
->start
,
229 resource_size(dma_res
));
232 * Adjust register offsets
234 ap
->ioaddr
.altstatus_addr
= ap
->ioaddr
.ctl_addr
;
235 ap
->ioaddr
.data_addr
= ap
->ioaddr
.cmd_addr
+
236 (ATA_REG_DATA
<< pdata
->reg_shift
);
237 ap
->ioaddr
.error_addr
= ap
->ioaddr
.cmd_addr
+
238 (ATA_REG_ERR
<< pdata
->reg_shift
);
239 ap
->ioaddr
.feature_addr
= ap
->ioaddr
.cmd_addr
+
240 (ATA_REG_FEATURE
<< pdata
->reg_shift
);
241 ap
->ioaddr
.nsect_addr
= ap
->ioaddr
.cmd_addr
+
242 (ATA_REG_NSECT
<< pdata
->reg_shift
);
243 ap
->ioaddr
.lbal_addr
= ap
->ioaddr
.cmd_addr
+
244 (ATA_REG_LBAL
<< pdata
->reg_shift
);
245 ap
->ioaddr
.lbam_addr
= ap
->ioaddr
.cmd_addr
+
246 (ATA_REG_LBAM
<< pdata
->reg_shift
);
247 ap
->ioaddr
.lbah_addr
= ap
->ioaddr
.cmd_addr
+
248 (ATA_REG_LBAH
<< pdata
->reg_shift
);
249 ap
->ioaddr
.device_addr
= ap
->ioaddr
.cmd_addr
+
250 (ATA_REG_DEVICE
<< pdata
->reg_shift
);
251 ap
->ioaddr
.status_addr
= ap
->ioaddr
.cmd_addr
+
252 (ATA_REG_STATUS
<< pdata
->reg_shift
);
253 ap
->ioaddr
.command_addr
= ap
->ioaddr
.cmd_addr
+
254 (ATA_REG_CMD
<< pdata
->reg_shift
);
257 * Allocate and load driver's internal data structure
259 data
= devm_kzalloc(&pdev
->dev
, sizeof(struct pata_pxa_data
),
264 ap
->private_data
= data
;
266 memset(&config
, 0, sizeof(config
));
267 config
.src_addr_width
= DMA_SLAVE_BUSWIDTH_2_BYTES
;
268 config
.dst_addr_width
= DMA_SLAVE_BUSWIDTH_2_BYTES
;
269 config
.src_addr
= dma_res
->start
;
270 config
.dst_addr
= dma_res
->start
;
271 config
.src_maxburst
= 32;
272 config
.dst_maxburst
= 32;
275 * Request the DMA channel
278 dma_request_slave_channel(&pdev
->dev
, "data");
281 ret
= dmaengine_slave_config(data
->dma_chan
, &config
);
283 dev_err(&pdev
->dev
, "dma configuration failed: %d\n", ret
);
288 * Activate the ATA host
290 ret
= ata_host_activate(host
, irq_res
->start
, ata_sff_interrupt
,
291 pdata
->irq_flags
, &pxa_ata_sht
);
293 dma_release_channel(data
->dma_chan
);
298 static int pxa_ata_remove(struct platform_device
*pdev
)
300 struct ata_host
*host
= platform_get_drvdata(pdev
);
301 struct pata_pxa_data
*data
= host
->ports
[0]->private_data
;
303 dma_release_channel(data
->dma_chan
);
305 ata_host_detach(host
);
310 static struct platform_driver pxa_ata_driver
= {
311 .probe
= pxa_ata_probe
,
312 .remove
= pxa_ata_remove
,
318 module_platform_driver(pxa_ata_driver
);
320 MODULE_AUTHOR("Marek Vasut <marek.vasut@gmail.com>");
321 MODULE_DESCRIPTION("DMA-capable driver for PATA on PXA CPU");
322 MODULE_LICENSE("GPL");
323 MODULE_VERSION(DRV_VERSION
);
324 MODULE_ALIAS("platform:" DRV_NAME
);