1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * drivers/ata/sata_dwc_460ex.c
5 * Synopsys DesignWare Cores (DWC) SATA host driver
7 * Author: Mark Miesfeld <mmiesfeld@amcc.com>
9 * Ported from 2.6.19.2 to 2.6.25/26 by Stefan Roese <sr@denx.de>
10 * Copyright 2008 DENX Software Engineering
12 * Based on versions provided by AMCC and Synopsys which are:
13 * Copyright 2006 Applied Micro Circuits Corporation
14 * COPYRIGHT (C) 2005 SYNOPSYS, INC. ALL RIGHTS RESERVED
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/device.h>
20 #include <linux/dmaengine.h>
22 #include <linux/of_irq.h>
23 #include <linux/platform_device.h>
24 #include <linux/phy/phy.h>
25 #include <linux/libata.h>
26 #include <linux/slab.h>
27 #include <trace/events/libata.h>
31 #include <scsi/scsi_host.h>
32 #include <scsi/scsi_cmnd.h>
34 /* These two are defined in "libata.h" */
38 #define DRV_NAME "sata-dwc"
39 #define DRV_VERSION "1.3"
41 #define sata_dwc_writel(a, v) writel_relaxed(v, a)
42 #define sata_dwc_readl(a) readl_relaxed(a)
44 #define AHB_DMA_BRST_DFLT 64 /* 16 data items burst length */
47 SATA_DWC_MAX_PORTS
= 1,
49 SATA_DWC_SCR_OFFSET
= 0x24,
50 SATA_DWC_REG_OFFSET
= 0x64,
53 /* DWC SATA Registers */
54 struct sata_dwc_regs
{
55 u32 fptagr
; /* 1st party DMA tag */
56 u32 fpbor
; /* 1st party DMA buffer offset */
57 u32 fptcr
; /* 1st party DMA Xfr count */
58 u32 dmacr
; /* DMA Control */
59 u32 dbtsr
; /* DMA Burst Transac size */
60 u32 intpr
; /* Interrupt Pending */
61 u32 intmr
; /* Interrupt Mask */
62 u32 errmr
; /* Error Mask */
63 u32 llcr
; /* Link Layer Control */
64 u32 phycr
; /* PHY Control */
65 u32 physr
; /* PHY Status */
66 u32 rxbistpd
; /* Recvd BIST pattern def register */
67 u32 rxbistpd1
; /* Recvd BIST data dword1 */
68 u32 rxbistpd2
; /* Recvd BIST pattern data dword2 */
69 u32 txbistpd
; /* Trans BIST pattern def register */
70 u32 txbistpd1
; /* Trans BIST data dword1 */
71 u32 txbistpd2
; /* Trans BIST data dword2 */
72 u32 bistcr
; /* BIST Control Register */
73 u32 bistfctr
; /* BIST FIS Count Register */
74 u32 bistsr
; /* BIST Status Register */
75 u32 bistdecr
; /* BIST Dword Error count register */
76 u32 res
[15]; /* Reserved locations */
77 u32 testr
; /* Test Register */
78 u32 versionr
; /* Version Register */
79 u32 idr
; /* ID Register */
80 u32 unimpl
[192]; /* Unimplemented */
81 u32 dmadr
[256]; /* FIFO Locations in DMA Mode */
85 SCR_SCONTROL_DET_ENABLE
= 0x00000001,
86 SCR_SSTATUS_DET_PRESENT
= 0x00000001,
87 SCR_SERROR_DIAG_X
= 0x04000000,
88 /* DWC SATA Register Operations */
89 SATA_DWC_TXFIFO_DEPTH
= 0x01FF,
90 SATA_DWC_RXFIFO_DEPTH
= 0x01FF,
91 SATA_DWC_DMACR_TMOD_TXCHEN
= 0x00000004,
92 SATA_DWC_DMACR_TXCHEN
= (0x00000001 | SATA_DWC_DMACR_TMOD_TXCHEN
),
93 SATA_DWC_DMACR_RXCHEN
= (0x00000002 | SATA_DWC_DMACR_TMOD_TXCHEN
),
94 SATA_DWC_DMACR_TXRXCH_CLEAR
= SATA_DWC_DMACR_TMOD_TXCHEN
,
95 SATA_DWC_INTPR_DMAT
= 0x00000001,
96 SATA_DWC_INTPR_NEWFP
= 0x00000002,
97 SATA_DWC_INTPR_PMABRT
= 0x00000004,
98 SATA_DWC_INTPR_ERR
= 0x00000008,
99 SATA_DWC_INTPR_NEWBIST
= 0x00000010,
100 SATA_DWC_INTPR_IPF
= 0x10000000,
101 SATA_DWC_INTMR_DMATM
= 0x00000001,
102 SATA_DWC_INTMR_NEWFPM
= 0x00000002,
103 SATA_DWC_INTMR_PMABRTM
= 0x00000004,
104 SATA_DWC_INTMR_ERRM
= 0x00000008,
105 SATA_DWC_INTMR_NEWBISTM
= 0x00000010,
106 SATA_DWC_LLCR_SCRAMEN
= 0x00000001,
107 SATA_DWC_LLCR_DESCRAMEN
= 0x00000002,
108 SATA_DWC_LLCR_RPDEN
= 0x00000004,
109 /* This is all error bits, zero's are reserved fields. */
110 SATA_DWC_SERROR_ERR_BITS
= 0x0FFF0F03
113 #define SATA_DWC_SCR0_SPD_GET(v) (((v) >> 4) & 0x0000000F)
114 #define SATA_DWC_DMACR_TX_CLEAR(v) (((v) & ~SATA_DWC_DMACR_TXCHEN) |\
115 SATA_DWC_DMACR_TMOD_TXCHEN)
116 #define SATA_DWC_DMACR_RX_CLEAR(v) (((v) & ~SATA_DWC_DMACR_RXCHEN) |\
117 SATA_DWC_DMACR_TMOD_TXCHEN)
118 #define SATA_DWC_DBTSR_MWR(size) (((size)/4) & SATA_DWC_TXFIFO_DEPTH)
119 #define SATA_DWC_DBTSR_MRD(size) ((((size)/4) & SATA_DWC_RXFIFO_DEPTH)\
121 struct sata_dwc_device
{
122 struct device
*dev
; /* generic device struct */
123 struct ata_probe_ent
*pe
; /* ptr to probe-ent */
124 struct ata_host
*host
;
125 struct sata_dwc_regs __iomem
*sata_dwc_regs
; /* DW SATA specific */
130 #ifdef CONFIG_SATA_DWC_OLD_DMA
131 struct dw_dma_chip
*dma
;
136 * Allow one extra special slot for commands and DMA management
137 * to account for libata internal commands.
139 #define SATA_DWC_QCMD_MAX (ATA_MAX_QUEUE + 1)
141 struct sata_dwc_device_port
{
142 struct sata_dwc_device
*hsdev
;
143 int cmd_issued
[SATA_DWC_QCMD_MAX
];
144 int dma_pending
[SATA_DWC_QCMD_MAX
];
147 struct dma_chan
*chan
;
148 struct dma_async_tx_descriptor
*desc
[SATA_DWC_QCMD_MAX
];
149 u32 dma_interrupt_count
;
153 * Commonly used DWC SATA driver macros
155 #define HSDEV_FROM_HOST(host) ((struct sata_dwc_device *)(host)->private_data)
156 #define HSDEV_FROM_AP(ap) ((struct sata_dwc_device *)(ap)->host->private_data)
157 #define HSDEVP_FROM_AP(ap) ((struct sata_dwc_device_port *)(ap)->private_data)
158 #define HSDEV_FROM_QC(qc) ((struct sata_dwc_device *)(qc)->ap->host->private_data)
159 #define HSDEV_FROM_HSDEVP(p) ((struct sata_dwc_device *)(p)->hsdev)
162 SATA_DWC_CMD_ISSUED_NOT
= 0,
163 SATA_DWC_CMD_ISSUED_PEND
= 1,
164 SATA_DWC_CMD_ISSUED_EXEC
= 2,
165 SATA_DWC_CMD_ISSUED_NODATA
= 3,
167 SATA_DWC_DMA_PENDING_NONE
= 0,
168 SATA_DWC_DMA_PENDING_TX
= 1,
169 SATA_DWC_DMA_PENDING_RX
= 2,
175 static void sata_dwc_bmdma_start_by_tag(struct ata_queued_cmd
*qc
, u8 tag
);
176 static int sata_dwc_qc_complete(struct ata_port
*ap
, struct ata_queued_cmd
*qc
);
177 static void sata_dwc_dma_xfer_complete(struct ata_port
*ap
);
178 static void sata_dwc_clear_dmacr(struct sata_dwc_device_port
*hsdevp
, u8 tag
);
180 #ifdef CONFIG_SATA_DWC_OLD_DMA
182 #include <linux/platform_data/dma-dw.h>
183 #include <linux/dma/dw.h>
185 static struct dw_dma_slave sata_dwc_dma_dws
= {
192 static bool sata_dwc_dma_filter(struct dma_chan
*chan
, void *param
)
194 struct dw_dma_slave
*dws
= &sata_dwc_dma_dws
;
196 if (dws
->dma_dev
!= chan
->device
->dev
)
203 static int sata_dwc_dma_get_channel_old(struct sata_dwc_device_port
*hsdevp
)
205 struct sata_dwc_device
*hsdev
= hsdevp
->hsdev
;
206 struct dw_dma_slave
*dws
= &sata_dwc_dma_dws
;
207 struct device
*dev
= hsdev
->dev
;
213 dma_cap_set(DMA_SLAVE
, mask
);
215 /* Acquire DMA channel */
216 hsdevp
->chan
= dma_request_channel(mask
, sata_dwc_dma_filter
, hsdevp
);
218 dev_err(dev
, "%s: dma channel unavailable\n", __func__
);
225 static int sata_dwc_dma_init_old(struct platform_device
*pdev
,
226 struct sata_dwc_device
*hsdev
)
228 struct device
*dev
= &pdev
->dev
;
229 struct device_node
*np
= dev
->of_node
;
231 hsdev
->dma
= devm_kzalloc(dev
, sizeof(*hsdev
->dma
), GFP_KERNEL
);
235 hsdev
->dma
->dev
= dev
;
236 hsdev
->dma
->id
= pdev
->id
;
238 /* Get SATA DMA interrupt number */
239 hsdev
->dma
->irq
= irq_of_parse_and_map(np
, 1);
240 if (!hsdev
->dma
->irq
) {
241 dev_err(dev
, "no SATA DMA irq\n");
245 /* Get physical SATA DMA register base address */
246 hsdev
->dma
->regs
= devm_platform_ioremap_resource(pdev
, 1);
247 if (IS_ERR(hsdev
->dma
->regs
))
248 return PTR_ERR(hsdev
->dma
->regs
);
250 /* Initialize AHB DMAC */
251 return dw_dma_probe(hsdev
->dma
);
254 static void sata_dwc_dma_exit_old(struct sata_dwc_device
*hsdev
)
259 dw_dma_remove(hsdev
->dma
);
264 static const char *get_prot_descript(u8 protocol
)
267 case ATA_PROT_NODATA
:
268 return "ATA no data";
275 case ATA_PROT_NCQ_NODATA
:
276 return "ATA NCQ no data";
277 case ATAPI_PROT_NODATA
:
278 return "ATAPI no data";
288 static void dma_dwc_xfer_done(void *hsdev_instance
)
291 struct sata_dwc_device
*hsdev
= hsdev_instance
;
292 struct ata_host
*host
= (struct ata_host
*)hsdev
->host
;
294 struct sata_dwc_device_port
*hsdevp
;
296 unsigned int port
= 0;
298 spin_lock_irqsave(&host
->lock
, flags
);
299 ap
= host
->ports
[port
];
300 hsdevp
= HSDEVP_FROM_AP(ap
);
301 tag
= ap
->link
.active_tag
;
304 * Each DMA command produces 2 interrupts. Only
305 * complete the command after both interrupts have been
306 * seen. (See sata_dwc_isr())
308 hsdevp
->dma_interrupt_count
++;
309 sata_dwc_clear_dmacr(hsdevp
, tag
);
311 if (hsdevp
->dma_pending
[tag
] == SATA_DWC_DMA_PENDING_NONE
) {
312 dev_err(ap
->dev
, "DMA not pending tag=0x%02x pending=%d\n",
313 tag
, hsdevp
->dma_pending
[tag
]);
316 if ((hsdevp
->dma_interrupt_count
% 2) == 0)
317 sata_dwc_dma_xfer_complete(ap
);
319 spin_unlock_irqrestore(&host
->lock
, flags
);
322 static struct dma_async_tx_descriptor
*dma_dwc_xfer_setup(struct ata_queued_cmd
*qc
)
324 struct ata_port
*ap
= qc
->ap
;
325 struct sata_dwc_device_port
*hsdevp
= HSDEVP_FROM_AP(ap
);
326 struct sata_dwc_device
*hsdev
= HSDEV_FROM_AP(ap
);
327 struct dma_slave_config sconf
;
328 struct dma_async_tx_descriptor
*desc
;
330 if (qc
->dma_dir
== DMA_DEV_TO_MEM
) {
331 sconf
.src_addr
= hsdev
->dmadr
;
332 sconf
.device_fc
= false;
333 } else { /* DMA_MEM_TO_DEV */
334 sconf
.dst_addr
= hsdev
->dmadr
;
335 sconf
.device_fc
= false;
338 sconf
.direction
= qc
->dma_dir
;
339 sconf
.src_maxburst
= AHB_DMA_BRST_DFLT
/ 4; /* in items */
340 sconf
.dst_maxburst
= AHB_DMA_BRST_DFLT
/ 4; /* in items */
341 sconf
.src_addr_width
= DMA_SLAVE_BUSWIDTH_4_BYTES
;
342 sconf
.dst_addr_width
= DMA_SLAVE_BUSWIDTH_4_BYTES
;
344 dmaengine_slave_config(hsdevp
->chan
, &sconf
);
346 /* Convert SG list to linked list of items (LLIs) for AHB DMA */
347 desc
= dmaengine_prep_slave_sg(hsdevp
->chan
, qc
->sg
, qc
->n_elem
,
349 DMA_PREP_INTERRUPT
| DMA_CTRL_ACK
);
354 desc
->callback
= dma_dwc_xfer_done
;
355 desc
->callback_param
= hsdev
;
357 dev_dbg(hsdev
->dev
, "%s sg: 0x%p, count: %d addr: %pa\n", __func__
,
358 qc
->sg
, qc
->n_elem
, &hsdev
->dmadr
);
363 static int sata_dwc_scr_read(struct ata_link
*link
, unsigned int scr
, u32
*val
)
365 if (scr
> SCR_NOTIFICATION
) {
366 dev_err(link
->ap
->dev
, "%s: Incorrect SCR offset 0x%02x\n",
371 *val
= sata_dwc_readl(link
->ap
->ioaddr
.scr_addr
+ (scr
* 4));
372 dev_dbg(link
->ap
->dev
, "%s: id=%d reg=%d val=0x%08x\n", __func__
,
373 link
->ap
->print_id
, scr
, *val
);
378 static int sata_dwc_scr_write(struct ata_link
*link
, unsigned int scr
, u32 val
)
380 dev_dbg(link
->ap
->dev
, "%s: id=%d reg=%d val=0x%08x\n", __func__
,
381 link
->ap
->print_id
, scr
, val
);
382 if (scr
> SCR_NOTIFICATION
) {
383 dev_err(link
->ap
->dev
, "%s: Incorrect SCR offset 0x%02x\n",
387 sata_dwc_writel(link
->ap
->ioaddr
.scr_addr
+ (scr
* 4), val
);
392 static void clear_serror(struct ata_port
*ap
)
395 sata_dwc_scr_read(&ap
->link
, SCR_ERROR
, &val
);
396 sata_dwc_scr_write(&ap
->link
, SCR_ERROR
, val
);
399 static void clear_interrupt_bit(struct sata_dwc_device
*hsdev
, u32 bit
)
401 sata_dwc_writel(&hsdev
->sata_dwc_regs
->intpr
,
402 sata_dwc_readl(&hsdev
->sata_dwc_regs
->intpr
));
405 static u32
qcmd_tag_to_mask(u8 tag
)
407 return 0x00000001 << (tag
& 0x1f);
411 static void sata_dwc_error_intr(struct ata_port
*ap
,
412 struct sata_dwc_device
*hsdev
, uint intpr
)
414 struct sata_dwc_device_port
*hsdevp
= HSDEVP_FROM_AP(ap
);
415 struct ata_eh_info
*ehi
= &ap
->link
.eh_info
;
416 unsigned int err_mask
= 0, action
= 0;
417 struct ata_queued_cmd
*qc
;
421 ata_ehi_clear_desc(ehi
);
423 sata_dwc_scr_read(&ap
->link
, SCR_ERROR
, &serror
);
424 status
= ap
->ops
->sff_check_status(ap
);
426 tag
= ap
->link
.active_tag
;
429 "%s SCR_ERROR=0x%08x intpr=0x%08x status=0x%08x dma_intp=%d pending=%d issued=%d",
430 __func__
, serror
, intpr
, status
, hsdevp
->dma_interrupt_count
,
431 hsdevp
->dma_pending
[tag
], hsdevp
->cmd_issued
[tag
]);
433 /* Clear error register and interrupt bit */
435 clear_interrupt_bit(hsdev
, SATA_DWC_INTPR_ERR
);
437 /* This is the only error happening now. TODO check for exact error */
439 err_mask
|= AC_ERR_HOST_BUS
;
440 action
|= ATA_EH_RESET
;
442 /* Pass this on to EH */
443 ehi
->serror
|= serror
;
444 ehi
->action
|= action
;
446 qc
= ata_qc_from_tag(ap
, tag
);
448 qc
->err_mask
|= err_mask
;
450 ehi
->err_mask
|= err_mask
;
456 * Function : sata_dwc_isr
457 * arguments : irq, void *dev_instance, struct pt_regs *regs
458 * Return value : irqreturn_t - status of IRQ
459 * This Interrupt handler called via port ops registered function.
460 * .irq_handler = sata_dwc_isr
462 static irqreturn_t
sata_dwc_isr(int irq
, void *dev_instance
)
464 struct ata_host
*host
= (struct ata_host
*)dev_instance
;
465 struct sata_dwc_device
*hsdev
= HSDEV_FROM_HOST(host
);
467 struct ata_queued_cmd
*qc
;
470 int handled
, port
= 0;
471 uint intpr
, sactive
, sactive2
, tag_mask
;
472 struct sata_dwc_device_port
*hsdevp
;
473 hsdev
->sactive_issued
= 0;
475 spin_lock_irqsave(&host
->lock
, flags
);
477 /* Read the interrupt register */
478 intpr
= sata_dwc_readl(&hsdev
->sata_dwc_regs
->intpr
);
480 ap
= host
->ports
[port
];
481 hsdevp
= HSDEVP_FROM_AP(ap
);
483 dev_dbg(ap
->dev
, "%s intpr=0x%08x active_tag=%d\n", __func__
, intpr
,
484 ap
->link
.active_tag
);
486 /* Check for error interrupt */
487 if (intpr
& SATA_DWC_INTPR_ERR
) {
488 sata_dwc_error_intr(ap
, hsdev
, intpr
);
493 /* Check for DMA SETUP FIS (FP DMA) interrupt */
494 if (intpr
& SATA_DWC_INTPR_NEWFP
) {
495 clear_interrupt_bit(hsdev
, SATA_DWC_INTPR_NEWFP
);
497 tag
= (u8
)(sata_dwc_readl(&hsdev
->sata_dwc_regs
->fptagr
));
498 dev_dbg(ap
->dev
, "%s: NEWFP tag=%d\n", __func__
, tag
);
499 if (hsdevp
->cmd_issued
[tag
] != SATA_DWC_CMD_ISSUED_PEND
)
500 dev_warn(ap
->dev
, "CMD tag=%d not pending?\n", tag
);
502 hsdev
->sactive_issued
|= qcmd_tag_to_mask(tag
);
504 qc
= ata_qc_from_tag(ap
, tag
);
506 dev_err(ap
->dev
, "failed to get qc");
511 * Start FP DMA for NCQ command. At this point the tag is the
512 * active tag. It is the tag that matches the command about to
515 trace_ata_bmdma_start(ap
, &qc
->tf
, tag
);
516 qc
->ap
->link
.active_tag
= tag
;
517 sata_dwc_bmdma_start_by_tag(qc
, tag
);
522 sata_dwc_scr_read(&ap
->link
, SCR_ACTIVE
, &sactive
);
523 tag_mask
= (hsdev
->sactive_issued
| sactive
) ^ sactive
;
525 /* If no sactive issued and tag_mask is zero then this is not NCQ */
526 if (hsdev
->sactive_issued
== 0 && tag_mask
== 0) {
527 if (ap
->link
.active_tag
== ATA_TAG_POISON
)
530 tag
= ap
->link
.active_tag
;
531 qc
= ata_qc_from_tag(ap
, tag
);
533 /* DEV interrupt w/ no active qc? */
534 if (unlikely(!qc
|| (qc
->tf
.flags
& ATA_TFLAG_POLLING
))) {
536 "%s interrupt with no active qc qc=%p\n",
538 ap
->ops
->sff_check_status(ap
);
542 status
= ap
->ops
->sff_check_status(ap
);
544 qc
->ap
->link
.active_tag
= tag
;
545 hsdevp
->cmd_issued
[tag
] = SATA_DWC_CMD_ISSUED_NOT
;
547 if (status
& ATA_ERR
) {
548 dev_dbg(ap
->dev
, "interrupt ATA_ERR (0x%x)\n", status
);
549 sata_dwc_qc_complete(ap
, qc
);
554 dev_dbg(ap
->dev
, "%s non-NCQ cmd interrupt, protocol: %s\n",
555 __func__
, get_prot_descript(qc
->tf
.protocol
));
557 if (ata_is_dma(qc
->tf
.protocol
)) {
559 * Each DMA transaction produces 2 interrupts. The DMAC
560 * transfer complete interrupt and the SATA controller
561 * operation done interrupt. The command should be
562 * completed only after both interrupts are seen.
564 hsdevp
->dma_interrupt_count
++;
565 if (hsdevp
->dma_pending
[tag
] == \
566 SATA_DWC_DMA_PENDING_NONE
) {
568 "%s: DMA not pending intpr=0x%08x status=0x%08x pending=%d\n",
569 __func__
, intpr
, status
,
570 hsdevp
->dma_pending
[tag
]);
573 if ((hsdevp
->dma_interrupt_count
% 2) == 0)
574 sata_dwc_dma_xfer_complete(ap
);
575 } else if (ata_is_pio(qc
->tf
.protocol
)) {
576 ata_sff_hsm_move(ap
, qc
, status
, 0);
580 if (unlikely(sata_dwc_qc_complete(ap
, qc
)))
589 * This is a NCQ command. At this point we need to figure out for which
590 * tags we have gotten a completion interrupt. One interrupt may serve
591 * as completion for more than one operation when commands are queued
592 * (NCQ). We need to process each completed command.
595 /* process completed commands */
596 sata_dwc_scr_read(&ap
->link
, SCR_ACTIVE
, &sactive
);
597 tag_mask
= (hsdev
->sactive_issued
| sactive
) ^ sactive
;
599 if (sactive
!= 0 || hsdev
->sactive_issued
> 1 || tag_mask
> 1) {
601 "%s NCQ:sactive=0x%08x sactive_issued=0x%08x tag_mask=0x%08x\n",
602 __func__
, sactive
, hsdev
->sactive_issued
, tag_mask
);
605 if ((tag_mask
| hsdev
->sactive_issued
) != hsdev
->sactive_issued
) {
607 "Bad tag mask? sactive=0x%08x sactive_issued=0x%08x tag_mask=0x%08x\n",
608 sactive
, hsdev
->sactive_issued
, tag_mask
);
611 /* read just to clear ... not bad if currently still busy */
612 status
= ap
->ops
->sff_check_status(ap
);
613 dev_dbg(ap
->dev
, "%s ATA status register=0x%x\n", __func__
, status
);
617 while (!(tag_mask
& 0x00000001)) {
622 tag_mask
&= (~0x00000001);
623 qc
= ata_qc_from_tag(ap
, tag
);
625 dev_err(ap
->dev
, "failed to get qc");
630 /* To be picked up by completion functions */
631 qc
->ap
->link
.active_tag
= tag
;
632 hsdevp
->cmd_issued
[tag
] = SATA_DWC_CMD_ISSUED_NOT
;
634 /* Let libata/scsi layers handle error */
635 if (status
& ATA_ERR
) {
636 dev_dbg(ap
->dev
, "%s ATA_ERR (0x%x)\n", __func__
,
638 sata_dwc_qc_complete(ap
, qc
);
643 /* Process completed command */
644 dev_dbg(ap
->dev
, "%s NCQ command, protocol: %s\n", __func__
,
645 get_prot_descript(qc
->tf
.protocol
));
646 if (ata_is_dma(qc
->tf
.protocol
)) {
647 hsdevp
->dma_interrupt_count
++;
648 if (hsdevp
->dma_pending
[tag
] == \
649 SATA_DWC_DMA_PENDING_NONE
)
650 dev_warn(ap
->dev
, "%s: DMA not pending?\n",
652 if ((hsdevp
->dma_interrupt_count
% 2) == 0)
653 sata_dwc_dma_xfer_complete(ap
);
655 if (unlikely(sata_dwc_qc_complete(ap
, qc
)))
661 ap
->stats
.idle_irq
++;
662 dev_warn(ap
->dev
, "STILL BUSY IRQ ata%d: irq trap\n",
664 } /* while tag_mask */
667 * Check to see if any commands completed while we were processing our
668 * initial set of completed commands (read status clears interrupts,
669 * so we might miss a completed command interrupt if one came in while
670 * we were processing --we read status as part of processing a completed
673 sata_dwc_scr_read(&ap
->link
, SCR_ACTIVE
, &sactive2
);
674 if (sactive2
!= sactive
) {
676 "More completed - sactive=0x%x sactive2=0x%x\n",
682 spin_unlock_irqrestore(&host
->lock
, flags
);
683 return IRQ_RETVAL(handled
);
686 static void sata_dwc_clear_dmacr(struct sata_dwc_device_port
*hsdevp
, u8 tag
)
688 struct sata_dwc_device
*hsdev
= HSDEV_FROM_HSDEVP(hsdevp
);
689 u32 dmacr
= sata_dwc_readl(&hsdev
->sata_dwc_regs
->dmacr
);
691 if (hsdevp
->dma_pending
[tag
] == SATA_DWC_DMA_PENDING_RX
) {
692 dmacr
= SATA_DWC_DMACR_RX_CLEAR(dmacr
);
693 sata_dwc_writel(&hsdev
->sata_dwc_regs
->dmacr
, dmacr
);
694 } else if (hsdevp
->dma_pending
[tag
] == SATA_DWC_DMA_PENDING_TX
) {
695 dmacr
= SATA_DWC_DMACR_TX_CLEAR(dmacr
);
696 sata_dwc_writel(&hsdev
->sata_dwc_regs
->dmacr
, dmacr
);
699 * This should not happen, it indicates the driver is out of
700 * sync. If it does happen, clear dmacr anyway.
703 "%s DMA protocol RX and TX DMA not pending tag=0x%02x pending=%d dmacr: 0x%08x\n",
704 __func__
, tag
, hsdevp
->dma_pending
[tag
], dmacr
);
705 sata_dwc_writel(&hsdev
->sata_dwc_regs
->dmacr
,
706 SATA_DWC_DMACR_TXRXCH_CLEAR
);
710 static void sata_dwc_dma_xfer_complete(struct ata_port
*ap
)
712 struct ata_queued_cmd
*qc
;
713 struct sata_dwc_device_port
*hsdevp
= HSDEVP_FROM_AP(ap
);
714 struct sata_dwc_device
*hsdev
= HSDEV_FROM_AP(ap
);
717 tag
= ap
->link
.active_tag
;
718 qc
= ata_qc_from_tag(ap
, tag
);
720 dev_err(ap
->dev
, "failed to get qc");
724 if (ata_is_dma(qc
->tf
.protocol
)) {
725 if (hsdevp
->dma_pending
[tag
] == SATA_DWC_DMA_PENDING_NONE
) {
727 "%s DMA protocol RX and TX DMA not pending dmacr: 0x%08x\n",
729 sata_dwc_readl(&hsdev
->sata_dwc_regs
->dmacr
));
732 hsdevp
->dma_pending
[tag
] = SATA_DWC_DMA_PENDING_NONE
;
733 sata_dwc_qc_complete(ap
, qc
);
734 ap
->link
.active_tag
= ATA_TAG_POISON
;
736 sata_dwc_qc_complete(ap
, qc
);
740 static int sata_dwc_qc_complete(struct ata_port
*ap
, struct ata_queued_cmd
*qc
)
745 struct sata_dwc_device
*hsdev
= HSDEV_FROM_AP(ap
);
746 struct sata_dwc_device_port
*hsdevp
= HSDEVP_FROM_AP(ap
);
747 hsdev
->sactive_queued
= 0;
749 if (hsdevp
->dma_pending
[tag
] == SATA_DWC_DMA_PENDING_TX
)
750 dev_err(ap
->dev
, "TX DMA PENDING\n");
751 else if (hsdevp
->dma_pending
[tag
] == SATA_DWC_DMA_PENDING_RX
)
752 dev_err(ap
->dev
, "RX DMA PENDING\n");
754 "QC complete cmd=0x%02x status=0x%02x ata%u: protocol=%d\n",
755 qc
->tf
.command
, status
, ap
->print_id
, qc
->tf
.protocol
);
757 /* clear active bit */
758 mask
= (~(qcmd_tag_to_mask(tag
)));
759 hsdev
->sactive_queued
= hsdev
->sactive_queued
& mask
;
760 hsdev
->sactive_issued
= hsdev
->sactive_issued
& mask
;
765 static void sata_dwc_enable_interrupts(struct sata_dwc_device
*hsdev
)
767 /* Enable selective interrupts by setting the interrupt maskregister*/
768 sata_dwc_writel(&hsdev
->sata_dwc_regs
->intmr
,
769 SATA_DWC_INTMR_ERRM
|
770 SATA_DWC_INTMR_NEWFPM
|
771 SATA_DWC_INTMR_PMABRTM
|
772 SATA_DWC_INTMR_DMATM
);
774 * Unmask the error bits that should trigger an error interrupt by
775 * setting the error mask register.
777 sata_dwc_writel(&hsdev
->sata_dwc_regs
->errmr
, SATA_DWC_SERROR_ERR_BITS
);
779 dev_dbg(hsdev
->dev
, "%s: INTMR = 0x%08x, ERRMR = 0x%08x\n",
780 __func__
, sata_dwc_readl(&hsdev
->sata_dwc_regs
->intmr
),
781 sata_dwc_readl(&hsdev
->sata_dwc_regs
->errmr
));
784 static void sata_dwc_setup_port(struct ata_ioports
*port
, void __iomem
*base
)
786 port
->cmd_addr
= base
+ 0x00;
787 port
->data_addr
= base
+ 0x00;
789 port
->error_addr
= base
+ 0x04;
790 port
->feature_addr
= base
+ 0x04;
792 port
->nsect_addr
= base
+ 0x08;
794 port
->lbal_addr
= base
+ 0x0c;
795 port
->lbam_addr
= base
+ 0x10;
796 port
->lbah_addr
= base
+ 0x14;
798 port
->device_addr
= base
+ 0x18;
799 port
->command_addr
= base
+ 0x1c;
800 port
->status_addr
= base
+ 0x1c;
802 port
->altstatus_addr
= base
+ 0x20;
803 port
->ctl_addr
= base
+ 0x20;
806 static int sata_dwc_dma_get_channel(struct sata_dwc_device_port
*hsdevp
)
808 struct sata_dwc_device
*hsdev
= hsdevp
->hsdev
;
809 struct device
*dev
= hsdev
->dev
;
811 #ifdef CONFIG_SATA_DWC_OLD_DMA
812 if (!of_property_present(dev
->of_node
, "dmas"))
813 return sata_dwc_dma_get_channel_old(hsdevp
);
816 hsdevp
->chan
= dma_request_chan(dev
, "sata-dma");
817 if (IS_ERR(hsdevp
->chan
)) {
818 dev_err(dev
, "failed to allocate dma channel: %ld\n",
819 PTR_ERR(hsdevp
->chan
));
820 return PTR_ERR(hsdevp
->chan
);
827 * Function : sata_dwc_port_start
828 * arguments : struct ata_ioports *port
829 * Return value : returns 0 if success, error code otherwise
830 * This function allocates the scatter gather LLI table for AHB DMA
832 static int sata_dwc_port_start(struct ata_port
*ap
)
835 struct sata_dwc_device
*hsdev
;
836 struct sata_dwc_device_port
*hsdevp
= NULL
;
840 hsdev
= HSDEV_FROM_AP(ap
);
842 dev_dbg(ap
->dev
, "%s: port_no=%d\n", __func__
, ap
->port_no
);
844 hsdev
->host
= ap
->host
;
845 pdev
= ap
->host
->dev
;
847 dev_err(ap
->dev
, "%s: no ap->host->dev\n", __func__
);
852 /* Allocate Port Struct */
853 hsdevp
= kzalloc(sizeof(*hsdevp
), GFP_KERNEL
);
858 hsdevp
->hsdev
= hsdev
;
860 err
= sata_dwc_dma_get_channel(hsdevp
);
864 err
= phy_power_on(hsdev
->phy
);
868 for (i
= 0; i
< SATA_DWC_QCMD_MAX
; i
++)
869 hsdevp
->cmd_issued
[i
] = SATA_DWC_CMD_ISSUED_NOT
;
871 ap
->bmdma_prd
= NULL
; /* set these so libata doesn't use them */
872 ap
->bmdma_prd_dma
= 0;
874 if (ap
->port_no
== 0) {
875 dev_dbg(ap
->dev
, "%s: clearing TXCHEN, RXCHEN in DMAC\n",
877 sata_dwc_writel(&hsdev
->sata_dwc_regs
->dmacr
,
878 SATA_DWC_DMACR_TXRXCH_CLEAR
);
880 dev_dbg(ap
->dev
, "%s: setting burst size in DBTSR\n",
882 sata_dwc_writel(&hsdev
->sata_dwc_regs
->dbtsr
,
883 (SATA_DWC_DBTSR_MWR(AHB_DMA_BRST_DFLT
) |
884 SATA_DWC_DBTSR_MRD(AHB_DMA_BRST_DFLT
)));
887 /* Clear any error bits before libata starts issuing commands */
889 ap
->private_data
= hsdevp
;
890 dev_dbg(ap
->dev
, "%s: done\n", __func__
);
896 dev_dbg(ap
->dev
, "%s: fail. ap->id = %d\n", __func__
, ap
->print_id
);
900 static void sata_dwc_port_stop(struct ata_port
*ap
)
902 struct sata_dwc_device_port
*hsdevp
= HSDEVP_FROM_AP(ap
);
903 struct sata_dwc_device
*hsdev
= HSDEV_FROM_AP(ap
);
905 dev_dbg(ap
->dev
, "%s: ap->id = %d\n", __func__
, ap
->print_id
);
907 dmaengine_terminate_sync(hsdevp
->chan
);
908 dma_release_channel(hsdevp
->chan
);
909 phy_power_off(hsdev
->phy
);
912 ap
->private_data
= NULL
;
916 * Function : sata_dwc_exec_command_by_tag
917 * arguments : ata_port *ap, ata_taskfile *tf, u8 tag, u32 cmd_issued
918 * Return value : None
919 * This function keeps track of individual command tag ids and calls
920 * ata_exec_command in libata
922 static void sata_dwc_exec_command_by_tag(struct ata_port
*ap
,
923 struct ata_taskfile
*tf
,
924 u8 tag
, u32 cmd_issued
)
926 struct sata_dwc_device_port
*hsdevp
= HSDEVP_FROM_AP(ap
);
928 hsdevp
->cmd_issued
[tag
] = cmd_issued
;
931 * Clear SError before executing a new command.
932 * sata_dwc_scr_write and read can not be used here. Clearing the PM
933 * managed SError register for the disk needs to be done before the
934 * task file is loaded.
937 ata_sff_exec_command(ap
, tf
);
940 static void sata_dwc_bmdma_setup_by_tag(struct ata_queued_cmd
*qc
, u8 tag
)
942 sata_dwc_exec_command_by_tag(qc
->ap
, &qc
->tf
, tag
,
943 SATA_DWC_CMD_ISSUED_PEND
);
946 static void sata_dwc_bmdma_setup(struct ata_queued_cmd
*qc
)
950 if (!ata_is_ncq(qc
->tf
.protocol
))
953 sata_dwc_bmdma_setup_by_tag(qc
, tag
);
956 static void sata_dwc_bmdma_start_by_tag(struct ata_queued_cmd
*qc
, u8 tag
)
960 struct sata_dwc_device
*hsdev
= HSDEV_FROM_QC(qc
);
961 struct ata_port
*ap
= qc
->ap
;
962 struct sata_dwc_device_port
*hsdevp
= HSDEVP_FROM_AP(ap
);
963 struct dma_async_tx_descriptor
*desc
= hsdevp
->desc
[tag
];
964 int dir
= qc
->dma_dir
;
966 if (hsdevp
->cmd_issued
[tag
] != SATA_DWC_CMD_ISSUED_NOT
) {
968 if (dir
== DMA_TO_DEVICE
)
969 hsdevp
->dma_pending
[tag
] = SATA_DWC_DMA_PENDING_TX
;
971 hsdevp
->dma_pending
[tag
] = SATA_DWC_DMA_PENDING_RX
;
974 "%s: Command not pending cmd_issued=%d (tag=%d) DMA NOT started\n",
975 __func__
, hsdevp
->cmd_issued
[tag
], tag
);
980 sata_dwc_scr_read(&ap
->link
, SCR_ERROR
, ®
);
981 if (reg
& SATA_DWC_SERROR_ERR_BITS
) {
982 dev_err(ap
->dev
, "%s: ****** SError=0x%08x ******\n",
986 if (dir
== DMA_TO_DEVICE
)
987 sata_dwc_writel(&hsdev
->sata_dwc_regs
->dmacr
,
988 SATA_DWC_DMACR_TXCHEN
);
990 sata_dwc_writel(&hsdev
->sata_dwc_regs
->dmacr
,
991 SATA_DWC_DMACR_RXCHEN
);
993 /* Enable AHB DMA transfer on the specified channel */
994 dmaengine_submit(desc
);
995 dma_async_issue_pending(hsdevp
->chan
);
999 static void sata_dwc_bmdma_start(struct ata_queued_cmd
*qc
)
1001 u8 tag
= qc
->hw_tag
;
1003 if (!ata_is_ncq(qc
->tf
.protocol
))
1006 sata_dwc_bmdma_start_by_tag(qc
, tag
);
1009 static unsigned int sata_dwc_qc_issue(struct ata_queued_cmd
*qc
)
1012 u8 tag
= qc
->hw_tag
;
1013 struct ata_port
*ap
= qc
->ap
;
1014 struct sata_dwc_device_port
*hsdevp
= HSDEVP_FROM_AP(ap
);
1016 if (!ata_is_ncq(qc
->tf
.protocol
))
1019 if (ata_is_dma(qc
->tf
.protocol
)) {
1020 hsdevp
->desc
[tag
] = dma_dwc_xfer_setup(qc
);
1021 if (!hsdevp
->desc
[tag
])
1022 return AC_ERR_SYSTEM
;
1024 hsdevp
->desc
[tag
] = NULL
;
1027 if (ata_is_ncq(qc
->tf
.protocol
)) {
1028 sata_dwc_scr_read(&ap
->link
, SCR_ACTIVE
, &sactive
);
1029 sactive
|= (0x00000001 << tag
);
1030 sata_dwc_scr_write(&ap
->link
, SCR_ACTIVE
, sactive
);
1032 trace_ata_tf_load(ap
, &qc
->tf
);
1033 ap
->ops
->sff_tf_load(ap
, &qc
->tf
);
1034 trace_ata_exec_command(ap
, &qc
->tf
, tag
);
1035 sata_dwc_exec_command_by_tag(ap
, &qc
->tf
, tag
,
1036 SATA_DWC_CMD_ISSUED_PEND
);
1038 return ata_bmdma_qc_issue(qc
);
1043 static void sata_dwc_error_handler(struct ata_port
*ap
)
1045 ata_sff_error_handler(ap
);
1048 static int sata_dwc_hardreset(struct ata_link
*link
, unsigned int *class,
1049 unsigned long deadline
)
1051 struct sata_dwc_device
*hsdev
= HSDEV_FROM_AP(link
->ap
);
1054 ret
= sata_sff_hardreset(link
, class, deadline
);
1056 sata_dwc_enable_interrupts(hsdev
);
1058 /* Reconfigure the DMA control register */
1059 sata_dwc_writel(&hsdev
->sata_dwc_regs
->dmacr
,
1060 SATA_DWC_DMACR_TXRXCH_CLEAR
);
1062 /* Reconfigure the DMA Burst Transaction Size register */
1063 sata_dwc_writel(&hsdev
->sata_dwc_regs
->dbtsr
,
1064 SATA_DWC_DBTSR_MWR(AHB_DMA_BRST_DFLT
) |
1065 SATA_DWC_DBTSR_MRD(AHB_DMA_BRST_DFLT
));
1070 static void sata_dwc_dev_select(struct ata_port
*ap
, unsigned int device
)
1072 /* SATA DWC is master only */
1076 * scsi mid-layer and libata interface structures
1078 static const struct scsi_host_template sata_dwc_sht
= {
1079 ATA_NCQ_SHT(DRV_NAME
),
1081 * test-only: Currently this driver doesn't handle NCQ
1082 * correctly. We enable NCQ but set the queue depth to a
1083 * max of 1. This will get fixed in in a future release.
1085 .sg_tablesize
= LIBATA_MAX_PRD
,
1086 /* .can_queue = ATA_MAX_QUEUE, */
1088 * Make sure a LLI block is not created that will span 8K max FIS
1089 * boundary. If the block spans such a FIS boundary, there is a chance
1090 * that a DMA burst will cross that boundary -- this results in an
1091 * error in the host controller.
1093 .dma_boundary
= 0x1fff /* ATA_DMA_BOUNDARY */,
1096 static struct ata_port_operations sata_dwc_ops
= {
1097 .inherits
= &ata_sff_port_ops
,
1099 .error_handler
= sata_dwc_error_handler
,
1100 .hardreset
= sata_dwc_hardreset
,
1102 .qc_issue
= sata_dwc_qc_issue
,
1104 .scr_read
= sata_dwc_scr_read
,
1105 .scr_write
= sata_dwc_scr_write
,
1107 .port_start
= sata_dwc_port_start
,
1108 .port_stop
= sata_dwc_port_stop
,
1110 .sff_dev_select
= sata_dwc_dev_select
,
1112 .bmdma_setup
= sata_dwc_bmdma_setup
,
1113 .bmdma_start
= sata_dwc_bmdma_start
,
1116 static const struct ata_port_info sata_dwc_port_info
[] = {
1118 .flags
= ATA_FLAG_SATA
| ATA_FLAG_NCQ
,
1119 .pio_mask
= ATA_PIO4
,
1120 .udma_mask
= ATA_UDMA6
,
1121 .port_ops
= &sata_dwc_ops
,
1125 static int sata_dwc_probe(struct platform_device
*ofdev
)
1127 struct device
*dev
= &ofdev
->dev
;
1128 struct device_node
*np
= dev
->of_node
;
1129 struct sata_dwc_device
*hsdev
;
1131 char *ver
= (char *)&versionr
;
1135 struct ata_host
*host
;
1136 struct ata_port_info pi
= sata_dwc_port_info
[0];
1137 const struct ata_port_info
*ppi
[] = { &pi
, NULL
};
1138 struct resource
*res
;
1140 /* Allocate DWC SATA device */
1141 host
= ata_host_alloc_pinfo(dev
, ppi
, SATA_DWC_MAX_PORTS
);
1142 hsdev
= devm_kzalloc(dev
, sizeof(*hsdev
), GFP_KERNEL
);
1143 if (!host
|| !hsdev
)
1146 host
->private_data
= hsdev
;
1148 /* Ioremap SATA registers */
1149 base
= devm_platform_get_and_ioremap_resource(ofdev
, 0, &res
);
1151 return PTR_ERR(base
);
1152 dev_dbg(dev
, "ioremap done for SATA register address\n");
1154 /* Synopsys DWC SATA specific Registers */
1155 hsdev
->sata_dwc_regs
= base
+ SATA_DWC_REG_OFFSET
;
1156 hsdev
->dmadr
= res
->start
+ SATA_DWC_REG_OFFSET
+ offsetof(struct sata_dwc_regs
, dmadr
);
1159 host
->ports
[0]->ioaddr
.cmd_addr
= base
;
1160 host
->ports
[0]->ioaddr
.scr_addr
= base
+ SATA_DWC_SCR_OFFSET
;
1161 sata_dwc_setup_port(&host
->ports
[0]->ioaddr
, base
);
1163 /* Read the ID and Version Registers */
1164 idr
= sata_dwc_readl(&hsdev
->sata_dwc_regs
->idr
);
1165 versionr
= sata_dwc_readl(&hsdev
->sata_dwc_regs
->versionr
);
1166 dev_notice(dev
, "id %d, controller version %c.%c%c\n", idr
, ver
[0], ver
[1], ver
[2]);
1168 /* Save dev for later use in dev_xxx() routines */
1171 /* Enable SATA Interrupts */
1172 sata_dwc_enable_interrupts(hsdev
);
1174 /* Get SATA interrupt number */
1175 irq
= irq_of_parse_and_map(np
, 0);
1177 dev_err(dev
, "no SATA DMA irq\n");
1181 #ifdef CONFIG_SATA_DWC_OLD_DMA
1182 if (!of_property_present(np
, "dmas")) {
1183 err
= sata_dwc_dma_init_old(ofdev
, hsdev
);
1189 hsdev
->phy
= devm_phy_optional_get(dev
, "sata-phy");
1190 if (IS_ERR(hsdev
->phy
))
1191 return PTR_ERR(hsdev
->phy
);
1193 err
= phy_init(hsdev
->phy
);
1198 * Now, register with libATA core, this will also initiate the
1199 * device discovery process, invoking our port_start() handler &
1200 * error_handler() to execute a dummy Softreset EH session
1202 err
= ata_host_activate(host
, irq
, sata_dwc_isr
, 0, &sata_dwc_sht
);
1204 dev_err(dev
, "failed to activate host");
1209 phy_exit(hsdev
->phy
);
1213 static void sata_dwc_remove(struct platform_device
*ofdev
)
1215 struct device
*dev
= &ofdev
->dev
;
1216 struct ata_host
*host
= dev_get_drvdata(dev
);
1217 struct sata_dwc_device
*hsdev
= host
->private_data
;
1219 ata_host_detach(host
);
1221 phy_exit(hsdev
->phy
);
1223 #ifdef CONFIG_SATA_DWC_OLD_DMA
1224 /* Free SATA DMA resources */
1225 sata_dwc_dma_exit_old(hsdev
);
1228 dev_dbg(dev
, "done\n");
1231 static const struct of_device_id sata_dwc_match
[] = {
1232 { .compatible
= "amcc,sata-460ex", },
1235 MODULE_DEVICE_TABLE(of
, sata_dwc_match
);
1237 static struct platform_driver sata_dwc_driver
= {
1240 .of_match_table
= sata_dwc_match
,
1242 .probe
= sata_dwc_probe
,
1243 .remove_new
= sata_dwc_remove
,
1246 module_platform_driver(sata_dwc_driver
);
1248 MODULE_LICENSE("GPL");
1249 MODULE_AUTHOR("Mark Miesfeld <mmiesfeld@amcc.com>");
1250 MODULE_DESCRIPTION("DesignWare Cores SATA controller low level driver");
1251 MODULE_VERSION(DRV_VERSION
);