1 // SPDX-License-Identifier: GPL-2.0-only
3 * Marvell 88SE64xx hardware specific
5 * Copyright 2007 Red Hat, Inc.
6 * Copyright 2008 Marvell. <kewei@marvell.com>
7 * Copyright 2009-2011 Marvell. <yuxiangl@marvell.com>
14 static void mvs_64xx_detect_porttype(struct mvs_info
*mvi
, int i
)
16 void __iomem
*regs
= mvi
->regs
;
18 struct mvs_phy
*phy
= &mvi
->phy
[i
];
20 reg
= mr32(MVS_GBL_PORT_TYPE
);
21 phy
->phy_type
&= ~(PORT_TYPE_SAS
| PORT_TYPE_SATA
);
22 if (reg
& MODE_SAS_SATA
& (1 << i
))
23 phy
->phy_type
|= PORT_TYPE_SAS
;
25 phy
->phy_type
|= PORT_TYPE_SATA
;
28 static void mvs_64xx_enable_xmt(struct mvs_info
*mvi
, int phy_id
)
30 void __iomem
*regs
= mvi
->regs
;
34 if (mvi
->chip
->n_phy
<= MVS_SOC_PORTS
)
35 tmp
|= 1 << (phy_id
+ PCS_EN_PORT_XMT_SHIFT
);
37 tmp
|= 1 << (phy_id
+ PCS_EN_PORT_XMT_SHIFT2
);
41 static void mvs_64xx_phy_hacks(struct mvs_info
*mvi
)
43 void __iomem
*regs
= mvi
->regs
;
48 if (!(mvi
->flags
& MVF_FLAG_SOC
)) {
49 for (i
= 0; i
< MVS_SOC_PORTS
; i
++) {
50 mvs_write_port_vsr_addr(mvi
, i
, VSR_PHY_MODE8
);
51 mvs_write_port_vsr_data(mvi
, i
, 0x2F0);
54 /* disable auto port detection */
55 mw32(MVS_GBL_PORT_TYPE
, 0);
56 for (i
= 0; i
< mvi
->chip
->n_phy
; i
++) {
57 mvs_write_port_vsr_addr(mvi
, i
, VSR_PHY_MODE7
);
58 mvs_write_port_vsr_data(mvi
, i
, 0x90000000);
59 mvs_write_port_vsr_addr(mvi
, i
, VSR_PHY_MODE9
);
60 mvs_write_port_vsr_data(mvi
, i
, 0x50f2);
61 mvs_write_port_vsr_addr(mvi
, i
, VSR_PHY_MODE11
);
62 mvs_write_port_vsr_data(mvi
, i
, 0x0e);
67 static void mvs_64xx_stp_reset(struct mvs_info
*mvi
, u32 phy_id
)
69 void __iomem
*regs
= mvi
->regs
;
72 if (!(mvi
->flags
& MVF_FLAG_SOC
)) {
73 if (phy_id
< MVS_SOC_PORTS
)
74 pci_read_config_dword(mvi
->pdev
, PCR_PHY_CTL
, ®
);
76 pci_read_config_dword(mvi
->pdev
, PCR_PHY_CTL2
, ®
);
79 reg
= mr32(MVS_PHY_CTL
);
82 if (phy_id
< MVS_SOC_PORTS
)
83 tmp
|= (1U << phy_id
) << PCTL_LINK_OFFS
;
85 tmp
|= (1U << (phy_id
- MVS_SOC_PORTS
)) << PCTL_LINK_OFFS
;
87 if (!(mvi
->flags
& MVF_FLAG_SOC
)) {
88 if (phy_id
< MVS_SOC_PORTS
) {
89 pci_write_config_dword(mvi
->pdev
, PCR_PHY_CTL
, tmp
);
91 pci_write_config_dword(mvi
->pdev
, PCR_PHY_CTL
, reg
);
93 pci_write_config_dword(mvi
->pdev
, PCR_PHY_CTL2
, tmp
);
95 pci_write_config_dword(mvi
->pdev
, PCR_PHY_CTL2
, reg
);
98 mw32(MVS_PHY_CTL
, tmp
);
100 mw32(MVS_PHY_CTL
, reg
);
104 static void mvs_64xx_phy_reset(struct mvs_info
*mvi
, u32 phy_id
, int hard
)
107 tmp
= mvs_read_port_irq_stat(mvi
, phy_id
);
108 tmp
&= ~PHYEV_RDY_CH
;
109 mvs_write_port_irq_stat(mvi
, phy_id
, tmp
);
110 tmp
= mvs_read_phy_ctl(mvi
, phy_id
);
111 if (hard
== MVS_HARD_RESET
)
113 else if (hard
== MVS_SOFT_RESET
)
115 mvs_write_phy_ctl(mvi
, phy_id
, tmp
);
118 tmp
= mvs_read_phy_ctl(mvi
, phy_id
);
119 } while (tmp
& PHY_RST_HARD
);
124 mvs_64xx_clear_srs_irq(struct mvs_info
*mvi
, u8 reg_set
, u8 clear_all
)
126 void __iomem
*regs
= mvi
->regs
;
129 tmp
= mr32(MVS_INT_STAT_SRS_0
);
131 printk(KERN_DEBUG
"check SRS 0 %08X.\n", tmp
);
132 mw32(MVS_INT_STAT_SRS_0
, tmp
);
135 tmp
= mr32(MVS_INT_STAT_SRS_0
);
136 if (tmp
& (1 << (reg_set
% 32))) {
137 printk(KERN_DEBUG
"register set 0x%x was stopped.\n",
139 mw32(MVS_INT_STAT_SRS_0
, 1 << (reg_set
% 32));
144 static int mvs_64xx_chip_reset(struct mvs_info
*mvi
)
146 void __iomem
*regs
= mvi
->regs
;
150 /* make sure interrupts are masked immediately (paranoia) */
151 mw32(MVS_GBL_CTL
, 0);
152 tmp
= mr32(MVS_GBL_CTL
);
154 /* Reset Controller */
155 if (!(tmp
& HBA_RST
)) {
156 if (mvi
->flags
& MVF_PHY_PWR_FIX
) {
157 pci_read_config_dword(mvi
->pdev
, PCR_PHY_CTL
, &tmp
);
158 tmp
&= ~PCTL_PWR_OFF
;
159 tmp
|= PCTL_PHY_DSBL
;
160 pci_write_config_dword(mvi
->pdev
, PCR_PHY_CTL
, tmp
);
162 pci_read_config_dword(mvi
->pdev
, PCR_PHY_CTL2
, &tmp
);
163 tmp
&= ~PCTL_PWR_OFF
;
164 tmp
|= PCTL_PHY_DSBL
;
165 pci_write_config_dword(mvi
->pdev
, PCR_PHY_CTL2
, tmp
);
169 /* make sure interrupts are masked immediately (paranoia) */
170 mw32(MVS_GBL_CTL
, 0);
171 tmp
= mr32(MVS_GBL_CTL
);
173 /* Reset Controller */
174 if (!(tmp
& HBA_RST
)) {
175 /* global reset, incl. COMRESET/H_RESET_N (self-clearing) */
176 mw32_f(MVS_GBL_CTL
, HBA_RST
);
179 /* wait for reset to finish; timeout is just a guess */
184 if (!(mr32(MVS_GBL_CTL
) & HBA_RST
))
187 if (mr32(MVS_GBL_CTL
) & HBA_RST
) {
188 dev_printk(KERN_ERR
, mvi
->dev
, "HBA reset failed\n");
194 static void mvs_64xx_phy_disable(struct mvs_info
*mvi
, u32 phy_id
)
196 void __iomem
*regs
= mvi
->regs
;
198 if (!(mvi
->flags
& MVF_FLAG_SOC
)) {
206 pci_read_config_dword(mvi
->pdev
, offs
, &tmp
);
207 tmp
|= 1U << (PCTL_PHY_DSBL_OFFS
+ phy_id
);
208 pci_write_config_dword(mvi
->pdev
, offs
, tmp
);
210 tmp
= mr32(MVS_PHY_CTL
);
211 tmp
|= 1U << (PCTL_PHY_DSBL_OFFS
+ phy_id
);
212 mw32(MVS_PHY_CTL
, tmp
);
216 static void mvs_64xx_phy_enable(struct mvs_info
*mvi
, u32 phy_id
)
218 void __iomem
*regs
= mvi
->regs
;
220 if (!(mvi
->flags
& MVF_FLAG_SOC
)) {
228 pci_read_config_dword(mvi
->pdev
, offs
, &tmp
);
229 tmp
&= ~(1U << (PCTL_PHY_DSBL_OFFS
+ phy_id
));
230 pci_write_config_dword(mvi
->pdev
, offs
, tmp
);
232 tmp
= mr32(MVS_PHY_CTL
);
233 tmp
&= ~(1U << (PCTL_PHY_DSBL_OFFS
+ phy_id
));
234 mw32(MVS_PHY_CTL
, tmp
);
238 static int mvs_64xx_init(struct mvs_info
*mvi
)
240 void __iomem
*regs
= mvi
->regs
;
244 if (mvi
->pdev
&& mvi
->pdev
->revision
== 0)
245 mvi
->flags
|= MVF_PHY_PWR_FIX
;
246 if (!(mvi
->flags
& MVF_FLAG_SOC
)) {
247 mvs_show_pcie_usage(mvi
);
248 tmp
= mvs_64xx_chip_reset(mvi
);
252 tmp
= mr32(MVS_PHY_CTL
);
253 tmp
&= ~PCTL_PWR_OFF
;
254 tmp
|= PCTL_PHY_DSBL
;
255 mw32(MVS_PHY_CTL
, tmp
);
259 /* make sure RST is set; HBA_RST /should/ have done that for us */
260 cctl
= mr32(MVS_CTL
) & 0xFFFF;
264 mw32_f(MVS_CTL
, cctl
| CCTL_RST
);
266 if (!(mvi
->flags
& MVF_FLAG_SOC
)) {
267 /* write to device control _AND_ device status register */
268 pci_read_config_dword(mvi
->pdev
, PCR_DEV_CTRL
, &tmp
);
269 tmp
&= ~PRD_REQ_MASK
;
271 pci_write_config_dword(mvi
->pdev
, PCR_DEV_CTRL
, tmp
);
273 pci_read_config_dword(mvi
->pdev
, PCR_PHY_CTL
, &tmp
);
274 tmp
&= ~PCTL_PWR_OFF
;
275 tmp
&= ~PCTL_PHY_DSBL
;
276 pci_write_config_dword(mvi
->pdev
, PCR_PHY_CTL
, tmp
);
278 pci_read_config_dword(mvi
->pdev
, PCR_PHY_CTL2
, &tmp
);
280 tmp
&= ~PCTL_PHY_DSBL
;
281 pci_write_config_dword(mvi
->pdev
, PCR_PHY_CTL2
, tmp
);
283 tmp
= mr32(MVS_PHY_CTL
);
284 tmp
&= ~PCTL_PWR_OFF
;
286 tmp
&= ~PCTL_PHY_DSBL
;
287 tmp
|= PCTL_LINK_RST
;
288 mw32(MVS_PHY_CTL
, tmp
);
290 tmp
&= ~PCTL_LINK_RST
;
291 mw32(MVS_PHY_CTL
, tmp
);
296 mw32(MVS_PCS
, 0); /* MVS_PCS */
298 mvs_64xx_phy_hacks(mvi
);
300 tmp
= mvs_cr32(mvi
, CMD_PHY_MODE_21
);
303 mvs_cw32(mvi
, CMD_PHY_MODE_21
, tmp
);
305 /* enable auto port detection */
306 mw32(MVS_GBL_PORT_TYPE
, MODE_AUTO_DET_EN
);
308 mw32(MVS_CMD_LIST_LO
, mvi
->slot_dma
);
309 mw32(MVS_CMD_LIST_HI
, (mvi
->slot_dma
>> 16) >> 16);
311 mw32(MVS_RX_FIS_LO
, mvi
->rx_fis_dma
);
312 mw32(MVS_RX_FIS_HI
, (mvi
->rx_fis_dma
>> 16) >> 16);
314 mw32(MVS_TX_CFG
, MVS_CHIP_SLOT_SZ
);
315 mw32(MVS_TX_LO
, mvi
->tx_dma
);
316 mw32(MVS_TX_HI
, (mvi
->tx_dma
>> 16) >> 16);
318 mw32(MVS_RX_CFG
, MVS_RX_RING_SZ
);
319 mw32(MVS_RX_LO
, mvi
->rx_dma
);
320 mw32(MVS_RX_HI
, (mvi
->rx_dma
>> 16) >> 16);
322 for (i
= 0; i
< mvi
->chip
->n_phy
; i
++) {
323 /* set phy local SAS address */
324 /* should set little endian SAS address to 64xx chip */
325 mvs_set_sas_addr(mvi
, i
, PHYR_ADDR_LO
, PHYR_ADDR_HI
,
326 cpu_to_be64(mvi
->phy
[i
].dev_sas_addr
));
328 mvs_64xx_enable_xmt(mvi
, i
);
330 mvs_64xx_phy_reset(mvi
, i
, MVS_HARD_RESET
);
332 mvs_64xx_detect_porttype(mvi
, i
);
334 if (mvi
->flags
& MVF_FLAG_SOC
) {
335 /* set select registers */
336 writel(0x0E008000, regs
+ 0x000);
337 writel(0x59000008, regs
+ 0x004);
338 writel(0x20, regs
+ 0x008);
339 writel(0x20, regs
+ 0x00c);
340 writel(0x20, regs
+ 0x010);
341 writel(0x20, regs
+ 0x014);
342 writel(0x20, regs
+ 0x018);
343 writel(0x20, regs
+ 0x01c);
345 for (i
= 0; i
< mvi
->chip
->n_phy
; i
++) {
346 /* clear phy int status */
347 tmp
= mvs_read_port_irq_stat(mvi
, i
);
348 tmp
&= ~PHYEV_SIG_FIS
;
349 mvs_write_port_irq_stat(mvi
, i
, tmp
);
351 /* set phy int mask */
352 tmp
= PHYEV_RDY_CH
| PHYEV_BROAD_CH
| PHYEV_UNASSOC_FIS
|
353 PHYEV_ID_DONE
| PHYEV_DCDR_ERR
| PHYEV_CRC_ERR
|
355 mvs_write_port_irq_mask(mvi
, i
, tmp
);
358 mvs_update_phyinfo(mvi
, i
, 1);
361 /* little endian for open address and command table, etc. */
362 cctl
= mr32(MVS_CTL
);
363 cctl
|= CCTL_ENDIAN_CMD
;
364 cctl
|= CCTL_ENDIAN_DATA
;
365 cctl
&= ~CCTL_ENDIAN_OPEN
;
366 cctl
|= CCTL_ENDIAN_RSP
;
367 mw32_f(MVS_CTL
, cctl
);
369 /* reset CMD queue */
372 tmp
&= ~PCS_SELF_CLEAR
;
375 * the max count is 0x1ff, while our max slot is 0x200,
376 * it will make count 0.
379 if (MVS_CHIP_SLOT_SZ
> 0x1ff)
380 mw32(MVS_INT_COAL
, 0x1ff | COAL_EN
);
382 mw32(MVS_INT_COAL
, MVS_CHIP_SLOT_SZ
| COAL_EN
);
384 tmp
= 0x10000 | interrupt_coalescing
;
385 mw32(MVS_INT_COAL_TMOUT
, tmp
);
387 /* ladies and gentlemen, start your engines */
389 mw32(MVS_TX_CFG
, MVS_CHIP_SLOT_SZ
| TX_EN
);
390 mw32(MVS_RX_CFG
, MVS_RX_RING_SZ
| RX_EN
);
391 /* enable CMD/CMPL_Q/RESP mode */
392 mw32(MVS_PCS
, PCS_SATA_RETRY
| PCS_FIS_RX_EN
|
393 PCS_CMD_EN
| PCS_CMD_STOP_ERR
);
395 /* enable completion queue interrupt */
396 tmp
= (CINT_PORT_MASK
| CINT_DONE
| CINT_MEM
| CINT_SRS
| CINT_CI_STOP
|
399 mw32(MVS_INT_MASK
, tmp
);
401 /* Enable SRS interrupt */
402 mw32(MVS_INT_MASK_SRS_0
, 0xFFFF);
407 static int mvs_64xx_ioremap(struct mvs_info
*mvi
)
409 if (!mvs_ioremap(mvi
, 4, 2))
414 static void mvs_64xx_iounmap(struct mvs_info
*mvi
)
416 mvs_iounmap(mvi
->regs
);
417 mvs_iounmap(mvi
->regs_ex
);
420 static void mvs_64xx_interrupt_enable(struct mvs_info
*mvi
)
422 void __iomem
*regs
= mvi
->regs
;
425 tmp
= mr32(MVS_GBL_CTL
);
426 mw32(MVS_GBL_CTL
, tmp
| INT_EN
);
429 static void mvs_64xx_interrupt_disable(struct mvs_info
*mvi
)
431 void __iomem
*regs
= mvi
->regs
;
434 tmp
= mr32(MVS_GBL_CTL
);
435 mw32(MVS_GBL_CTL
, tmp
& ~INT_EN
);
438 static u32
mvs_64xx_isr_status(struct mvs_info
*mvi
, int irq
)
440 void __iomem
*regs
= mvi
->regs
;
443 if (!(mvi
->flags
& MVF_FLAG_SOC
)) {
444 stat
= mr32(MVS_GBL_INT_STAT
);
446 if (stat
== 0 || stat
== 0xffffffff)
453 static irqreturn_t
mvs_64xx_isr(struct mvs_info
*mvi
, int irq
, u32 stat
)
455 void __iomem
*regs
= mvi
->regs
;
457 /* clear CMD_CMPLT ASAP */
458 mw32_f(MVS_INT_STAT
, CINT_DONE
);
460 spin_lock(&mvi
->lock
);
462 spin_unlock(&mvi
->lock
);
467 static void mvs_64xx_command_active(struct mvs_info
*mvi
, u32 slot_idx
)
470 mvs_cw32(mvi
, 0x40 + (slot_idx
>> 3), 1 << (slot_idx
% 32));
471 mvs_cw32(mvi
, 0x00 + (slot_idx
>> 3), 1 << (slot_idx
% 32));
473 tmp
= mvs_cr32(mvi
, 0x00 + (slot_idx
>> 3));
474 } while (tmp
& 1 << (slot_idx
% 32));
476 tmp
= mvs_cr32(mvi
, 0x40 + (slot_idx
>> 3));
477 } while (tmp
& 1 << (slot_idx
% 32));
480 static void mvs_64xx_issue_stop(struct mvs_info
*mvi
, enum mvs_port_type type
,
483 void __iomem
*regs
= mvi
->regs
;
486 if (type
== PORT_TYPE_SATA
) {
487 tmp
= mr32(MVS_INT_STAT_SRS_0
) | (1U << tfs
);
488 mw32(MVS_INT_STAT_SRS_0
, tmp
);
490 mw32(MVS_INT_STAT
, CINT_CI_STOP
);
491 tmp
= mr32(MVS_PCS
) | 0xFF00;
495 static void mvs_64xx_free_reg_set(struct mvs_info
*mvi
, u8
*tfs
)
497 void __iomem
*regs
= mvi
->regs
;
500 if (*tfs
== MVS_ID_NOT_MAPPED
)
503 offs
= 1U << ((*tfs
& 0x0f) + PCS_EN_SATA_REG_SHIFT
);
506 mw32(MVS_PCS
, tmp
& ~offs
);
509 mw32(MVS_CTL
, tmp
& ~offs
);
512 tmp
= mr32(MVS_INT_STAT_SRS_0
) & (1U << *tfs
);
514 mw32(MVS_INT_STAT_SRS_0
, tmp
);
516 *tfs
= MVS_ID_NOT_MAPPED
;
520 static u8
mvs_64xx_assign_reg_set(struct mvs_info
*mvi
, u8
*tfs
)
524 void __iomem
*regs
= mvi
->regs
;
526 if (*tfs
!= MVS_ID_NOT_MAPPED
)
531 for (i
= 0; i
< mvi
->chip
->srs_sz
; i
++) {
534 offs
= 1U << ((i
& 0x0f) + PCS_EN_SATA_REG_SHIFT
);
539 mw32(MVS_PCS
, tmp
| offs
);
541 mw32(MVS_CTL
, tmp
| offs
);
542 tmp
= mr32(MVS_INT_STAT_SRS_0
) & (1U << i
);
544 mw32(MVS_INT_STAT_SRS_0
, tmp
);
548 return MVS_ID_NOT_MAPPED
;
551 static void mvs_64xx_make_prd(struct scatterlist
*scatter
, int nr
, void *prd
)
554 struct scatterlist
*sg
;
555 struct mvs_prd
*buf_prd
= prd
;
556 for_each_sg(scatter
, sg
, nr
, i
) {
557 buf_prd
->addr
= cpu_to_le64(sg_dma_address(sg
));
558 buf_prd
->len
= cpu_to_le32(sg_dma_len(sg
));
563 static int mvs_64xx_oob_done(struct mvs_info
*mvi
, int i
)
566 mvs_write_port_cfg_addr(mvi
, i
,
568 phy_st
= mvs_read_port_cfg_data(mvi
, i
);
569 if (phy_st
& PHY_OOB_DTCTD
)
574 static void mvs_64xx_fix_phy_info(struct mvs_info
*mvi
, int i
,
575 struct sas_identify_frame
*id
)
578 struct mvs_phy
*phy
= &mvi
->phy
[i
];
579 struct asd_sas_phy
*sas_phy
= &phy
->sas_phy
;
582 (phy
->phy_status
& PHY_NEG_SPP_PHYS_LINK_RATE_MASK
) >>
583 PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET
;
585 phy
->minimum_linkrate
=
587 PHY_MIN_SPP_PHYS_LINK_RATE_MASK
) >> 8;
588 phy
->maximum_linkrate
=
590 PHY_MAX_SPP_PHYS_LINK_RATE_MASK
) >> 12;
592 mvs_write_port_cfg_addr(mvi
, i
, PHYR_IDENTIFY
);
593 phy
->dev_info
= mvs_read_port_cfg_data(mvi
, i
);
595 mvs_write_port_cfg_addr(mvi
, i
, PHYR_ATT_DEV_INFO
);
596 phy
->att_dev_info
= mvs_read_port_cfg_data(mvi
, i
);
598 mvs_write_port_cfg_addr(mvi
, i
, PHYR_ATT_ADDR_HI
);
599 phy
->att_dev_sas_addr
=
600 (u64
) mvs_read_port_cfg_data(mvi
, i
) << 32;
601 mvs_write_port_cfg_addr(mvi
, i
, PHYR_ATT_ADDR_LO
);
602 phy
->att_dev_sas_addr
|= mvs_read_port_cfg_data(mvi
, i
);
603 phy
->att_dev_sas_addr
= SAS_ADDR(&phy
->att_dev_sas_addr
);
606 static void mvs_64xx_phy_work_around(struct mvs_info
*mvi
, int i
)
609 struct mvs_phy
*phy
= &mvi
->phy
[i
];
610 mvs_write_port_vsr_addr(mvi
, i
, VSR_PHY_MODE6
);
611 tmp
= mvs_read_port_vsr_data(mvi
, i
);
612 if (((phy
->phy_status
& PHY_NEG_SPP_PHYS_LINK_RATE_MASK
) >>
613 PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET
) ==
614 SAS_LINK_RATE_1_5_GBPS
)
615 tmp
&= ~PHY_MODE6_LATECLK
;
617 tmp
|= PHY_MODE6_LATECLK
;
618 mvs_write_port_vsr_data(mvi
, i
, tmp
);
621 static void mvs_64xx_phy_set_link_rate(struct mvs_info
*mvi
, u32 phy_id
,
622 struct sas_phy_linkrates
*rates
)
624 u32 lrmin
= 0, lrmax
= 0;
627 tmp
= mvs_read_phy_ctl(mvi
, phy_id
);
628 lrmin
= (rates
->minimum_linkrate
<< 8);
629 lrmax
= (rates
->maximum_linkrate
<< 12);
639 mvs_write_phy_ctl(mvi
, phy_id
, tmp
);
640 mvs_64xx_phy_reset(mvi
, phy_id
, MVS_HARD_RESET
);
643 static void mvs_64xx_clear_active_cmds(struct mvs_info
*mvi
)
646 void __iomem
*regs
= mvi
->regs
;
648 mw32(MVS_PCS
, tmp
& 0xFFFF);
651 mw32(MVS_CTL
, tmp
& 0xFFFF);
656 static u32
mvs_64xx_spi_read_data(struct mvs_info
*mvi
)
658 void __iomem
*regs
= mvi
->regs_ex
;
659 return ior32(SPI_DATA_REG_64XX
);
662 static void mvs_64xx_spi_write_data(struct mvs_info
*mvi
, u32 data
)
664 void __iomem
*regs
= mvi
->regs_ex
;
666 iow32(SPI_DATA_REG_64XX
, data
);
670 static int mvs_64xx_spi_buildcmd(struct mvs_info
*mvi
,
680 dwTmp
= ((u32
)cmd
<< 24) | ((u32
)length
<< 19);
684 if (addr
!= MV_MAX_U32
) {
686 dwTmp
|= (addr
& 0x0003FFFF);
694 static int mvs_64xx_spi_issuecmd(struct mvs_info
*mvi
, u32 cmd
)
696 void __iomem
*regs
= mvi
->regs_ex
;
699 for (retry
= 0; retry
< 1; retry
++) {
700 iow32(SPI_CTRL_REG_64XX
, SPI_CTRL_VENDOR_ENABLE
);
701 iow32(SPI_CMD_REG_64XX
, cmd
);
702 iow32(SPI_CTRL_REG_64XX
,
703 SPI_CTRL_VENDOR_ENABLE
| SPI_CTRL_SPISTART
);
709 static int mvs_64xx_spi_waitdataready(struct mvs_info
*mvi
, u32 timeout
)
711 void __iomem
*regs
= mvi
->regs_ex
;
714 for (i
= 0; i
< timeout
; i
++) {
715 dwTmp
= ior32(SPI_CTRL_REG_64XX
);
716 if (!(dwTmp
& SPI_CTRL_SPISTART
))
724 static void mvs_64xx_fix_dma(struct mvs_info
*mvi
, u32 phy_mask
,
725 int buf_len
, int from
, void *prd
)
728 struct mvs_prd
*buf_prd
= prd
;
729 dma_addr_t buf_dma
= mvi
->bulk_buffer_dma
;
732 for (i
= 0; i
< MAX_SG_ENTRY
- from
; i
++) {
733 buf_prd
->addr
= cpu_to_le64(buf_dma
);
734 buf_prd
->len
= cpu_to_le32(buf_len
);
739 static void mvs_64xx_tune_interrupt(struct mvs_info
*mvi
, u32 time
)
741 void __iomem
*regs
= mvi
->regs
;
744 * the max count is 0x1ff, while our max slot is 0x200,
745 * it will make count 0.
748 mw32(MVS_INT_COAL
, 0);
749 mw32(MVS_INT_COAL_TMOUT
, 0x10000);
751 if (MVS_CHIP_SLOT_SZ
> 0x1ff)
752 mw32(MVS_INT_COAL
, 0x1ff|COAL_EN
);
754 mw32(MVS_INT_COAL
, MVS_CHIP_SLOT_SZ
|COAL_EN
);
756 tmp
= 0x10000 | time
;
757 mw32(MVS_INT_COAL_TMOUT
, tmp
);
761 const struct mvs_dispatch mvs_64xx_dispatch
= {
769 mvs_64xx_interrupt_enable
,
770 mvs_64xx_interrupt_disable
,
773 mvs_read_port_cfg_data
,
774 mvs_write_port_cfg_data
,
775 mvs_write_port_cfg_addr
,
776 mvs_read_port_vsr_data
,
777 mvs_write_port_vsr_data
,
778 mvs_write_port_vsr_addr
,
779 mvs_read_port_irq_stat
,
780 mvs_write_port_irq_stat
,
781 mvs_read_port_irq_mask
,
782 mvs_write_port_irq_mask
,
783 mvs_64xx_command_active
,
784 mvs_64xx_clear_srs_irq
,
789 mvs_64xx_assign_reg_set
,
790 mvs_64xx_free_reg_set
,
794 mvs_64xx_detect_porttype
,
796 mvs_64xx_fix_phy_info
,
797 mvs_64xx_phy_work_around
,
798 mvs_64xx_phy_set_link_rate
,
799 mvs_hw_max_link_rate
,
800 mvs_64xx_phy_disable
,
804 mvs_64xx_clear_active_cmds
,
805 mvs_64xx_spi_read_data
,
806 mvs_64xx_spi_write_data
,
807 mvs_64xx_spi_buildcmd
,
808 mvs_64xx_spi_issuecmd
,
809 mvs_64xx_spi_waitdataready
,
811 mvs_64xx_tune_interrupt
,