2 * Marvell 88SE64xx hardware specific
4 * Copyright 2007 Red Hat, Inc.
5 * Copyright 2008 Marvell. <kewei@marvell.com>
6 * Copyright 2009-2011 Marvell. <yuxiangl@marvell.com>
8 * This file is licensed under GPLv2.
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License as
12 * published by the Free Software Foundation; version 2 of the
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
30 static void mvs_64xx_detect_porttype(struct mvs_info
*mvi
, int i
)
32 void __iomem
*regs
= mvi
->regs
;
34 struct mvs_phy
*phy
= &mvi
->phy
[i
];
36 reg
= mr32(MVS_GBL_PORT_TYPE
);
37 phy
->phy_type
&= ~(PORT_TYPE_SAS
| PORT_TYPE_SATA
);
38 if (reg
& MODE_SAS_SATA
& (1 << i
))
39 phy
->phy_type
|= PORT_TYPE_SAS
;
41 phy
->phy_type
|= PORT_TYPE_SATA
;
44 static void __devinit
mvs_64xx_enable_xmt(struct mvs_info
*mvi
, int phy_id
)
46 void __iomem
*regs
= mvi
->regs
;
50 if (mvi
->chip
->n_phy
<= MVS_SOC_PORTS
)
51 tmp
|= 1 << (phy_id
+ PCS_EN_PORT_XMT_SHIFT
);
53 tmp
|= 1 << (phy_id
+ PCS_EN_PORT_XMT_SHIFT2
);
57 static void __devinit
mvs_64xx_phy_hacks(struct mvs_info
*mvi
)
59 void __iomem
*regs
= mvi
->regs
;
64 if (!(mvi
->flags
& MVF_FLAG_SOC
)) {
65 for (i
= 0; i
< MVS_SOC_PORTS
; i
++) {
66 mvs_write_port_vsr_addr(mvi
, i
, VSR_PHY_MODE8
);
67 mvs_write_port_vsr_data(mvi
, i
, 0x2F0);
70 /* disable auto port detection */
71 mw32(MVS_GBL_PORT_TYPE
, 0);
72 for (i
= 0; i
< mvi
->chip
->n_phy
; i
++) {
73 mvs_write_port_vsr_addr(mvi
, i
, VSR_PHY_MODE7
);
74 mvs_write_port_vsr_data(mvi
, i
, 0x90000000);
75 mvs_write_port_vsr_addr(mvi
, i
, VSR_PHY_MODE9
);
76 mvs_write_port_vsr_data(mvi
, i
, 0x50f2);
77 mvs_write_port_vsr_addr(mvi
, i
, VSR_PHY_MODE11
);
78 mvs_write_port_vsr_data(mvi
, i
, 0x0e);
83 static void mvs_64xx_stp_reset(struct mvs_info
*mvi
, u32 phy_id
)
85 void __iomem
*regs
= mvi
->regs
;
88 if (!(mvi
->flags
& MVF_FLAG_SOC
)) {
89 if (phy_id
< MVS_SOC_PORTS
)
90 pci_read_config_dword(mvi
->pdev
, PCR_PHY_CTL
, ®
);
92 pci_read_config_dword(mvi
->pdev
, PCR_PHY_CTL2
, ®
);
95 reg
= mr32(MVS_PHY_CTL
);
98 if (phy_id
< MVS_SOC_PORTS
)
99 tmp
|= (1U << phy_id
) << PCTL_LINK_OFFS
;
101 tmp
|= (1U << (phy_id
- MVS_SOC_PORTS
)) << PCTL_LINK_OFFS
;
103 if (!(mvi
->flags
& MVF_FLAG_SOC
)) {
104 if (phy_id
< MVS_SOC_PORTS
) {
105 pci_write_config_dword(mvi
->pdev
, PCR_PHY_CTL
, tmp
);
107 pci_write_config_dword(mvi
->pdev
, PCR_PHY_CTL
, reg
);
109 pci_write_config_dword(mvi
->pdev
, PCR_PHY_CTL2
, tmp
);
111 pci_write_config_dword(mvi
->pdev
, PCR_PHY_CTL2
, reg
);
114 mw32(MVS_PHY_CTL
, tmp
);
116 mw32(MVS_PHY_CTL
, reg
);
120 static void mvs_64xx_phy_reset(struct mvs_info
*mvi
, u32 phy_id
, int hard
)
123 tmp
= mvs_read_port_irq_stat(mvi
, phy_id
);
124 tmp
&= ~PHYEV_RDY_CH
;
125 mvs_write_port_irq_stat(mvi
, phy_id
, tmp
);
126 tmp
= mvs_read_phy_ctl(mvi
, phy_id
);
127 if (hard
== MVS_HARD_RESET
)
129 else if (hard
== MVS_SOFT_RESET
)
131 mvs_write_phy_ctl(mvi
, phy_id
, tmp
);
134 tmp
= mvs_read_phy_ctl(mvi
, phy_id
);
135 } while (tmp
& PHY_RST_HARD
);
139 void mvs_64xx_clear_srs_irq(struct mvs_info
*mvi
, u8 reg_set
, u8 clear_all
)
141 void __iomem
*regs
= mvi
->regs
;
144 tmp
= mr32(MVS_INT_STAT_SRS_0
);
146 printk(KERN_DEBUG
"check SRS 0 %08X.\n", tmp
);
147 mw32(MVS_INT_STAT_SRS_0
, tmp
);
150 tmp
= mr32(MVS_INT_STAT_SRS_0
);
151 if (tmp
& (1 << (reg_set
% 32))) {
152 printk(KERN_DEBUG
"register set 0x%x was stopped.\n",
154 mw32(MVS_INT_STAT_SRS_0
, 1 << (reg_set
% 32));
159 static int __devinit
mvs_64xx_chip_reset(struct mvs_info
*mvi
)
161 void __iomem
*regs
= mvi
->regs
;
165 /* make sure interrupts are masked immediately (paranoia) */
166 mw32(MVS_GBL_CTL
, 0);
167 tmp
= mr32(MVS_GBL_CTL
);
169 /* Reset Controller */
170 if (!(tmp
& HBA_RST
)) {
171 if (mvi
->flags
& MVF_PHY_PWR_FIX
) {
172 pci_read_config_dword(mvi
->pdev
, PCR_PHY_CTL
, &tmp
);
173 tmp
&= ~PCTL_PWR_OFF
;
174 tmp
|= PCTL_PHY_DSBL
;
175 pci_write_config_dword(mvi
->pdev
, PCR_PHY_CTL
, tmp
);
177 pci_read_config_dword(mvi
->pdev
, PCR_PHY_CTL2
, &tmp
);
178 tmp
&= ~PCTL_PWR_OFF
;
179 tmp
|= PCTL_PHY_DSBL
;
180 pci_write_config_dword(mvi
->pdev
, PCR_PHY_CTL2
, tmp
);
184 /* make sure interrupts are masked immediately (paranoia) */
185 mw32(MVS_GBL_CTL
, 0);
186 tmp
= mr32(MVS_GBL_CTL
);
188 /* Reset Controller */
189 if (!(tmp
& HBA_RST
)) {
190 /* global reset, incl. COMRESET/H_RESET_N (self-clearing) */
191 mw32_f(MVS_GBL_CTL
, HBA_RST
);
194 /* wait for reset to finish; timeout is just a guess */
199 if (!(mr32(MVS_GBL_CTL
) & HBA_RST
))
202 if (mr32(MVS_GBL_CTL
) & HBA_RST
) {
203 dev_printk(KERN_ERR
, mvi
->dev
, "HBA reset failed\n");
209 static void mvs_64xx_phy_disable(struct mvs_info
*mvi
, u32 phy_id
)
211 void __iomem
*regs
= mvi
->regs
;
213 if (!(mvi
->flags
& MVF_FLAG_SOC
)) {
221 pci_read_config_dword(mvi
->pdev
, offs
, &tmp
);
222 tmp
|= 1U << (PCTL_PHY_DSBL_OFFS
+ phy_id
);
223 pci_write_config_dword(mvi
->pdev
, offs
, tmp
);
225 tmp
= mr32(MVS_PHY_CTL
);
226 tmp
|= 1U << (PCTL_PHY_DSBL_OFFS
+ phy_id
);
227 mw32(MVS_PHY_CTL
, tmp
);
231 static void mvs_64xx_phy_enable(struct mvs_info
*mvi
, u32 phy_id
)
233 void __iomem
*regs
= mvi
->regs
;
235 if (!(mvi
->flags
& MVF_FLAG_SOC
)) {
243 pci_read_config_dword(mvi
->pdev
, offs
, &tmp
);
244 tmp
&= ~(1U << (PCTL_PHY_DSBL_OFFS
+ phy_id
));
245 pci_write_config_dword(mvi
->pdev
, offs
, tmp
);
247 tmp
= mr32(MVS_PHY_CTL
);
248 tmp
&= ~(1U << (PCTL_PHY_DSBL_OFFS
+ phy_id
));
249 mw32(MVS_PHY_CTL
, tmp
);
253 static int __devinit
mvs_64xx_init(struct mvs_info
*mvi
)
255 void __iomem
*regs
= mvi
->regs
;
259 if (mvi
->pdev
&& mvi
->pdev
->revision
== 0)
260 mvi
->flags
|= MVF_PHY_PWR_FIX
;
261 if (!(mvi
->flags
& MVF_FLAG_SOC
)) {
262 mvs_show_pcie_usage(mvi
);
263 tmp
= mvs_64xx_chip_reset(mvi
);
267 tmp
= mr32(MVS_PHY_CTL
);
268 tmp
&= ~PCTL_PWR_OFF
;
269 tmp
|= PCTL_PHY_DSBL
;
270 mw32(MVS_PHY_CTL
, tmp
);
274 /* make sure RST is set; HBA_RST /should/ have done that for us */
275 cctl
= mr32(MVS_CTL
) & 0xFFFF;
279 mw32_f(MVS_CTL
, cctl
| CCTL_RST
);
281 if (!(mvi
->flags
& MVF_FLAG_SOC
)) {
282 /* write to device control _AND_ device status register */
283 pci_read_config_dword(mvi
->pdev
, PCR_DEV_CTRL
, &tmp
);
284 tmp
&= ~PRD_REQ_MASK
;
286 pci_write_config_dword(mvi
->pdev
, PCR_DEV_CTRL
, tmp
);
288 pci_read_config_dword(mvi
->pdev
, PCR_PHY_CTL
, &tmp
);
289 tmp
&= ~PCTL_PWR_OFF
;
290 tmp
&= ~PCTL_PHY_DSBL
;
291 pci_write_config_dword(mvi
->pdev
, PCR_PHY_CTL
, tmp
);
293 pci_read_config_dword(mvi
->pdev
, PCR_PHY_CTL2
, &tmp
);
295 tmp
&= ~PCTL_PHY_DSBL
;
296 pci_write_config_dword(mvi
->pdev
, PCR_PHY_CTL2
, tmp
);
298 tmp
= mr32(MVS_PHY_CTL
);
299 tmp
&= ~PCTL_PWR_OFF
;
301 tmp
&= ~PCTL_PHY_DSBL
;
302 tmp
|= PCTL_LINK_RST
;
303 mw32(MVS_PHY_CTL
, tmp
);
305 tmp
&= ~PCTL_LINK_RST
;
306 mw32(MVS_PHY_CTL
, tmp
);
311 mw32(MVS_PCS
, 0); /* MVS_PCS */
313 mvs_64xx_phy_hacks(mvi
);
315 tmp
= mvs_cr32(mvi
, CMD_PHY_MODE_21
);
318 mvs_cw32(mvi
, CMD_PHY_MODE_21
, tmp
);
320 /* enable auto port detection */
321 mw32(MVS_GBL_PORT_TYPE
, MODE_AUTO_DET_EN
);
323 mw32(MVS_CMD_LIST_LO
, mvi
->slot_dma
);
324 mw32(MVS_CMD_LIST_HI
, (mvi
->slot_dma
>> 16) >> 16);
326 mw32(MVS_RX_FIS_LO
, mvi
->rx_fis_dma
);
327 mw32(MVS_RX_FIS_HI
, (mvi
->rx_fis_dma
>> 16) >> 16);
329 mw32(MVS_TX_CFG
, MVS_CHIP_SLOT_SZ
);
330 mw32(MVS_TX_LO
, mvi
->tx_dma
);
331 mw32(MVS_TX_HI
, (mvi
->tx_dma
>> 16) >> 16);
333 mw32(MVS_RX_CFG
, MVS_RX_RING_SZ
);
334 mw32(MVS_RX_LO
, mvi
->rx_dma
);
335 mw32(MVS_RX_HI
, (mvi
->rx_dma
>> 16) >> 16);
337 for (i
= 0; i
< mvi
->chip
->n_phy
; i
++) {
338 /* set phy local SAS address */
339 /* should set little endian SAS address to 64xx chip */
340 mvs_set_sas_addr(mvi
, i
, PHYR_ADDR_LO
, PHYR_ADDR_HI
,
341 cpu_to_be64(mvi
->phy
[i
].dev_sas_addr
));
343 mvs_64xx_enable_xmt(mvi
, i
);
345 mvs_64xx_phy_reset(mvi
, i
, MVS_HARD_RESET
);
347 mvs_64xx_detect_porttype(mvi
, i
);
349 if (mvi
->flags
& MVF_FLAG_SOC
) {
350 /* set select registers */
351 writel(0x0E008000, regs
+ 0x000);
352 writel(0x59000008, regs
+ 0x004);
353 writel(0x20, regs
+ 0x008);
354 writel(0x20, regs
+ 0x00c);
355 writel(0x20, regs
+ 0x010);
356 writel(0x20, regs
+ 0x014);
357 writel(0x20, regs
+ 0x018);
358 writel(0x20, regs
+ 0x01c);
360 for (i
= 0; i
< mvi
->chip
->n_phy
; i
++) {
361 /* clear phy int status */
362 tmp
= mvs_read_port_irq_stat(mvi
, i
);
363 tmp
&= ~PHYEV_SIG_FIS
;
364 mvs_write_port_irq_stat(mvi
, i
, tmp
);
366 /* set phy int mask */
367 tmp
= PHYEV_RDY_CH
| PHYEV_BROAD_CH
| PHYEV_UNASSOC_FIS
|
368 PHYEV_ID_DONE
| PHYEV_DCDR_ERR
| PHYEV_CRC_ERR
|
370 mvs_write_port_irq_mask(mvi
, i
, tmp
);
373 mvs_update_phyinfo(mvi
, i
, 1);
376 /* little endian for open address and command table, etc. */
377 cctl
= mr32(MVS_CTL
);
378 cctl
|= CCTL_ENDIAN_CMD
;
379 cctl
|= CCTL_ENDIAN_DATA
;
380 cctl
&= ~CCTL_ENDIAN_OPEN
;
381 cctl
|= CCTL_ENDIAN_RSP
;
382 mw32_f(MVS_CTL
, cctl
);
384 /* reset CMD queue */
387 tmp
&= ~PCS_SELF_CLEAR
;
390 * the max count is 0x1ff, while our max slot is 0x200,
391 * it will make count 0.
394 if (MVS_CHIP_SLOT_SZ
> 0x1ff)
395 mw32(MVS_INT_COAL
, 0x1ff | COAL_EN
);
397 mw32(MVS_INT_COAL
, MVS_CHIP_SLOT_SZ
| COAL_EN
);
399 tmp
= 0x10000 | interrupt_coalescing
;
400 mw32(MVS_INT_COAL_TMOUT
, tmp
);
402 /* ladies and gentlemen, start your engines */
404 mw32(MVS_TX_CFG
, MVS_CHIP_SLOT_SZ
| TX_EN
);
405 mw32(MVS_RX_CFG
, MVS_RX_RING_SZ
| RX_EN
);
406 /* enable CMD/CMPL_Q/RESP mode */
407 mw32(MVS_PCS
, PCS_SATA_RETRY
| PCS_FIS_RX_EN
|
408 PCS_CMD_EN
| PCS_CMD_STOP_ERR
);
410 /* enable completion queue interrupt */
411 tmp
= (CINT_PORT_MASK
| CINT_DONE
| CINT_MEM
| CINT_SRS
| CINT_CI_STOP
|
414 mw32(MVS_INT_MASK
, tmp
);
416 /* Enable SRS interrupt */
417 mw32(MVS_INT_MASK_SRS_0
, 0xFFFF);
422 static int mvs_64xx_ioremap(struct mvs_info
*mvi
)
424 if (!mvs_ioremap(mvi
, 4, 2))
429 static void mvs_64xx_iounmap(struct mvs_info
*mvi
)
431 mvs_iounmap(mvi
->regs
);
432 mvs_iounmap(mvi
->regs_ex
);
435 static void mvs_64xx_interrupt_enable(struct mvs_info
*mvi
)
437 void __iomem
*regs
= mvi
->regs
;
440 tmp
= mr32(MVS_GBL_CTL
);
441 mw32(MVS_GBL_CTL
, tmp
| INT_EN
);
444 static void mvs_64xx_interrupt_disable(struct mvs_info
*mvi
)
446 void __iomem
*regs
= mvi
->regs
;
449 tmp
= mr32(MVS_GBL_CTL
);
450 mw32(MVS_GBL_CTL
, tmp
& ~INT_EN
);
453 static u32
mvs_64xx_isr_status(struct mvs_info
*mvi
, int irq
)
455 void __iomem
*regs
= mvi
->regs
;
458 if (!(mvi
->flags
& MVF_FLAG_SOC
)) {
459 stat
= mr32(MVS_GBL_INT_STAT
);
461 if (stat
== 0 || stat
== 0xffffffff)
468 static irqreturn_t
mvs_64xx_isr(struct mvs_info
*mvi
, int irq
, u32 stat
)
470 void __iomem
*regs
= mvi
->regs
;
472 /* clear CMD_CMPLT ASAP */
473 mw32_f(MVS_INT_STAT
, CINT_DONE
);
475 spin_lock(&mvi
->lock
);
477 spin_unlock(&mvi
->lock
);
482 static void mvs_64xx_command_active(struct mvs_info
*mvi
, u32 slot_idx
)
485 mvs_cw32(mvi
, 0x40 + (slot_idx
>> 3), 1 << (slot_idx
% 32));
486 mvs_cw32(mvi
, 0x00 + (slot_idx
>> 3), 1 << (slot_idx
% 32));
488 tmp
= mvs_cr32(mvi
, 0x00 + (slot_idx
>> 3));
489 } while (tmp
& 1 << (slot_idx
% 32));
491 tmp
= mvs_cr32(mvi
, 0x40 + (slot_idx
>> 3));
492 } while (tmp
& 1 << (slot_idx
% 32));
495 static void mvs_64xx_issue_stop(struct mvs_info
*mvi
, enum mvs_port_type type
,
498 void __iomem
*regs
= mvi
->regs
;
501 if (type
== PORT_TYPE_SATA
) {
502 tmp
= mr32(MVS_INT_STAT_SRS_0
) | (1U << tfs
);
503 mw32(MVS_INT_STAT_SRS_0
, tmp
);
505 mw32(MVS_INT_STAT
, CINT_CI_STOP
);
506 tmp
= mr32(MVS_PCS
) | 0xFF00;
510 static void mvs_64xx_free_reg_set(struct mvs_info
*mvi
, u8
*tfs
)
512 void __iomem
*regs
= mvi
->regs
;
515 if (*tfs
== MVS_ID_NOT_MAPPED
)
518 offs
= 1U << ((*tfs
& 0x0f) + PCS_EN_SATA_REG_SHIFT
);
521 mw32(MVS_PCS
, tmp
& ~offs
);
524 mw32(MVS_CTL
, tmp
& ~offs
);
527 tmp
= mr32(MVS_INT_STAT_SRS_0
) & (1U << *tfs
);
529 mw32(MVS_INT_STAT_SRS_0
, tmp
);
531 *tfs
= MVS_ID_NOT_MAPPED
;
535 static u8
mvs_64xx_assign_reg_set(struct mvs_info
*mvi
, u8
*tfs
)
539 void __iomem
*regs
= mvi
->regs
;
541 if (*tfs
!= MVS_ID_NOT_MAPPED
)
546 for (i
= 0; i
< mvi
->chip
->srs_sz
; i
++) {
549 offs
= 1U << ((i
& 0x0f) + PCS_EN_SATA_REG_SHIFT
);
554 mw32(MVS_PCS
, tmp
| offs
);
556 mw32(MVS_CTL
, tmp
| offs
);
557 tmp
= mr32(MVS_INT_STAT_SRS_0
) & (1U << i
);
559 mw32(MVS_INT_STAT_SRS_0
, tmp
);
563 return MVS_ID_NOT_MAPPED
;
566 void mvs_64xx_make_prd(struct scatterlist
*scatter
, int nr
, void *prd
)
569 struct scatterlist
*sg
;
570 struct mvs_prd
*buf_prd
= prd
;
571 for_each_sg(scatter
, sg
, nr
, i
) {
572 buf_prd
->addr
= cpu_to_le64(sg_dma_address(sg
));
573 buf_prd
->len
= cpu_to_le32(sg_dma_len(sg
));
578 static int mvs_64xx_oob_done(struct mvs_info
*mvi
, int i
)
581 mvs_write_port_cfg_addr(mvi
, i
,
583 phy_st
= mvs_read_port_cfg_data(mvi
, i
);
584 if (phy_st
& PHY_OOB_DTCTD
)
589 static void mvs_64xx_fix_phy_info(struct mvs_info
*mvi
, int i
,
590 struct sas_identify_frame
*id
)
593 struct mvs_phy
*phy
= &mvi
->phy
[i
];
594 struct asd_sas_phy
*sas_phy
= &phy
->sas_phy
;
597 (phy
->phy_status
& PHY_NEG_SPP_PHYS_LINK_RATE_MASK
) >>
598 PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET
;
600 phy
->minimum_linkrate
=
602 PHY_MIN_SPP_PHYS_LINK_RATE_MASK
) >> 8;
603 phy
->maximum_linkrate
=
605 PHY_MAX_SPP_PHYS_LINK_RATE_MASK
) >> 12;
607 mvs_write_port_cfg_addr(mvi
, i
, PHYR_IDENTIFY
);
608 phy
->dev_info
= mvs_read_port_cfg_data(mvi
, i
);
610 mvs_write_port_cfg_addr(mvi
, i
, PHYR_ATT_DEV_INFO
);
611 phy
->att_dev_info
= mvs_read_port_cfg_data(mvi
, i
);
613 mvs_write_port_cfg_addr(mvi
, i
, PHYR_ATT_ADDR_HI
);
614 phy
->att_dev_sas_addr
=
615 (u64
) mvs_read_port_cfg_data(mvi
, i
) << 32;
616 mvs_write_port_cfg_addr(mvi
, i
, PHYR_ATT_ADDR_LO
);
617 phy
->att_dev_sas_addr
|= mvs_read_port_cfg_data(mvi
, i
);
618 phy
->att_dev_sas_addr
= SAS_ADDR(&phy
->att_dev_sas_addr
);
621 static void mvs_64xx_phy_work_around(struct mvs_info
*mvi
, int i
)
624 struct mvs_phy
*phy
= &mvi
->phy
[i
];
625 mvs_write_port_vsr_addr(mvi
, i
, VSR_PHY_MODE6
);
626 tmp
= mvs_read_port_vsr_data(mvi
, i
);
627 if (((phy
->phy_status
& PHY_NEG_SPP_PHYS_LINK_RATE_MASK
) >>
628 PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET
) ==
629 SAS_LINK_RATE_1_5_GBPS
)
630 tmp
&= ~PHY_MODE6_LATECLK
;
632 tmp
|= PHY_MODE6_LATECLK
;
633 mvs_write_port_vsr_data(mvi
, i
, tmp
);
636 void mvs_64xx_phy_set_link_rate(struct mvs_info
*mvi
, u32 phy_id
,
637 struct sas_phy_linkrates
*rates
)
639 u32 lrmin
= 0, lrmax
= 0;
642 tmp
= mvs_read_phy_ctl(mvi
, phy_id
);
643 lrmin
= (rates
->minimum_linkrate
<< 8);
644 lrmax
= (rates
->maximum_linkrate
<< 12);
654 mvs_write_phy_ctl(mvi
, phy_id
, tmp
);
655 mvs_64xx_phy_reset(mvi
, phy_id
, MVS_HARD_RESET
);
658 static void mvs_64xx_clear_active_cmds(struct mvs_info
*mvi
)
661 void __iomem
*regs
= mvi
->regs
;
663 mw32(MVS_PCS
, tmp
& 0xFFFF);
666 mw32(MVS_CTL
, tmp
& 0xFFFF);
671 u32
mvs_64xx_spi_read_data(struct mvs_info
*mvi
)
673 void __iomem
*regs
= mvi
->regs_ex
;
674 return ior32(SPI_DATA_REG_64XX
);
677 void mvs_64xx_spi_write_data(struct mvs_info
*mvi
, u32 data
)
679 void __iomem
*regs
= mvi
->regs_ex
;
680 iow32(SPI_DATA_REG_64XX
, data
);
684 int mvs_64xx_spi_buildcmd(struct mvs_info
*mvi
,
694 dwTmp
= ((u32
)cmd
<< 24) | ((u32
)length
<< 19);
698 if (addr
!= MV_MAX_U32
) {
700 dwTmp
|= (addr
& 0x0003FFFF);
708 int mvs_64xx_spi_issuecmd(struct mvs_info
*mvi
, u32 cmd
)
710 void __iomem
*regs
= mvi
->regs_ex
;
713 for (retry
= 0; retry
< 1; retry
++) {
714 iow32(SPI_CTRL_REG_64XX
, SPI_CTRL_VENDOR_ENABLE
);
715 iow32(SPI_CMD_REG_64XX
, cmd
);
716 iow32(SPI_CTRL_REG_64XX
,
717 SPI_CTRL_VENDOR_ENABLE
| SPI_CTRL_SPISTART
);
723 int mvs_64xx_spi_waitdataready(struct mvs_info
*mvi
, u32 timeout
)
725 void __iomem
*regs
= mvi
->regs_ex
;
728 for (i
= 0; i
< timeout
; i
++) {
729 dwTmp
= ior32(SPI_CTRL_REG_64XX
);
730 if (!(dwTmp
& SPI_CTRL_SPISTART
))
738 void mvs_64xx_fix_dma(struct mvs_info
*mvi
, u32 phy_mask
,
739 int buf_len
, int from
, void *prd
)
742 struct mvs_prd
*buf_prd
= prd
;
743 dma_addr_t buf_dma
= mvi
->bulk_buffer_dma
;
746 for (i
= 0; i
< MAX_SG_ENTRY
- from
; i
++) {
747 buf_prd
->addr
= cpu_to_le64(buf_dma
);
748 buf_prd
->len
= cpu_to_le32(buf_len
);
753 static void mvs_64xx_tune_interrupt(struct mvs_info
*mvi
, u32 time
)
755 void __iomem
*regs
= mvi
->regs
;
758 * the max count is 0x1ff, while our max slot is 0x200,
759 * it will make count 0.
762 mw32(MVS_INT_COAL
, 0);
763 mw32(MVS_INT_COAL_TMOUT
, 0x10000);
765 if (MVS_CHIP_SLOT_SZ
> 0x1ff)
766 mw32(MVS_INT_COAL
, 0x1ff|COAL_EN
);
768 mw32(MVS_INT_COAL
, MVS_CHIP_SLOT_SZ
|COAL_EN
);
770 tmp
= 0x10000 | time
;
771 mw32(MVS_INT_COAL_TMOUT
, tmp
);
775 const struct mvs_dispatch mvs_64xx_dispatch
= {
783 mvs_64xx_interrupt_enable
,
784 mvs_64xx_interrupt_disable
,
787 mvs_read_port_cfg_data
,
788 mvs_write_port_cfg_data
,
789 mvs_write_port_cfg_addr
,
790 mvs_read_port_vsr_data
,
791 mvs_write_port_vsr_data
,
792 mvs_write_port_vsr_addr
,
793 mvs_read_port_irq_stat
,
794 mvs_write_port_irq_stat
,
795 mvs_read_port_irq_mask
,
796 mvs_write_port_irq_mask
,
797 mvs_64xx_command_active
,
798 mvs_64xx_clear_srs_irq
,
803 mvs_64xx_assign_reg_set
,
804 mvs_64xx_free_reg_set
,
808 mvs_64xx_detect_porttype
,
810 mvs_64xx_fix_phy_info
,
811 mvs_64xx_phy_work_around
,
812 mvs_64xx_phy_set_link_rate
,
813 mvs_hw_max_link_rate
,
814 mvs_64xx_phy_disable
,
818 mvs_64xx_clear_active_cmds
,
819 mvs_64xx_spi_read_data
,
820 mvs_64xx_spi_write_data
,
821 mvs_64xx_spi_buildcmd
,
822 mvs_64xx_spi_issuecmd
,
823 mvs_64xx_spi_waitdataready
,
825 mvs_64xx_tune_interrupt
,