2 * Marvell 88SE94xx hardware specific
4 * Copyright 2007 Red Hat, Inc.
5 * Copyright 2008 Marvell. <kewei@marvell.com>
6 * Copyright 2009-2011 Marvell. <yuxiangl@marvell.com>
8 * This file is licensed under GPLv2.
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License as
12 * published by the Free Software Foundation; version 2 of the
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
30 static void mvs_94xx_detect_porttype(struct mvs_info
*mvi
, int i
)
33 struct mvs_phy
*phy
= &mvi
->phy
[i
];
36 mvs_write_port_vsr_addr(mvi
, i
, VSR_PHY_MODE3
);
37 reg
= mvs_read_port_vsr_data(mvi
, i
);
38 phy_status
= ((reg
& 0x3f0000) >> 16) & 0xff;
39 phy
->phy_type
&= ~(PORT_TYPE_SAS
| PORT_TYPE_SATA
);
42 phy
->phy_type
|= PORT_TYPE_SAS
;
46 phy
->phy_type
|= PORT_TYPE_SATA
;
51 void set_phy_tuning(struct mvs_info
*mvi
, int phy_id
,
52 struct phy_tuning phy_tuning
)
54 u32 tmp
, setting_0
= 0, setting_1
= 0;
57 /* Remap information for B0 chip:
59 * R0Ch -> R118h[15:0] (Adapted DFE F3 - F5 coefficient)
60 * R0Dh -> R118h[31:16] (Generation 1 Setting 0)
61 * R0Eh -> R11Ch[15:0] (Generation 1 Setting 1)
62 * R0Fh -> R11Ch[31:16] (Generation 2 Setting 0)
63 * R10h -> R120h[15:0] (Generation 2 Setting 1)
64 * R11h -> R120h[31:16] (Generation 3 Setting 0)
65 * R12h -> R124h[15:0] (Generation 3 Setting 1)
66 * R13h -> R124h[31:16] (Generation 4 Setting 0 (Reserved))
69 /* A0 has a different set of registers */
70 if (mvi
->pdev
->revision
== VANIR_A0_REV
)
73 for (i
= 0; i
< 3; i
++) {
74 /* loop 3 times, set Gen 1, Gen 2, Gen 3 */
77 setting_0
= GENERATION_1_SETTING
;
78 setting_1
= GENERATION_1_2_SETTING
;
81 setting_0
= GENERATION_1_2_SETTING
;
82 setting_1
= GENERATION_2_3_SETTING
;
85 setting_0
= GENERATION_2_3_SETTING
;
86 setting_1
= GENERATION_3_4_SETTING
;
92 * Transmitter Emphasis Enable
93 * Transmitter Emphasis Amplitude
94 * Transmitter Amplitude
96 mvs_write_port_vsr_addr(mvi
, phy_id
, setting_0
);
97 tmp
= mvs_read_port_vsr_data(mvi
, phy_id
);
98 tmp
&= ~(0xFBE << 16);
99 tmp
|= (((phy_tuning
.trans_emp_en
<< 11) |
100 (phy_tuning
.trans_emp_amp
<< 7) |
101 (phy_tuning
.trans_amp
<< 1)) << 16);
102 mvs_write_port_vsr_data(mvi
, phy_id
, tmp
);
104 /* Set Transmitter Amplitude Adjust */
105 mvs_write_port_vsr_addr(mvi
, phy_id
, setting_1
);
106 tmp
= mvs_read_port_vsr_data(mvi
, phy_id
);
108 tmp
|= (phy_tuning
.trans_amp_adj
<< 14);
109 mvs_write_port_vsr_data(mvi
, phy_id
, tmp
);
113 void set_phy_ffe_tuning(struct mvs_info
*mvi
, int phy_id
,
114 struct ffe_control ffe
)
118 /* Don't run this if A0/B0 */
119 if ((mvi
->pdev
->revision
== VANIR_A0_REV
)
120 || (mvi
->pdev
->revision
== VANIR_B0_REV
))
123 /* FFE Resistor and Capacitor */
124 /* R10Ch DFE Resolution Control/Squelch and FFE Setting
130 mvs_write_port_vsr_addr(mvi
, phy_id
, VSR_PHY_FFE_CONTROL
);
131 tmp
= mvs_read_port_vsr_data(mvi
, phy_id
);
134 /* Read from HBA_Info_Page */
136 (ffe
.ffe_rss_sel
<< 4) |
137 (ffe
.ffe_cap_sel
<< 0));
139 mvs_write_port_vsr_data(mvi
, phy_id
, tmp
);
141 /* R064h PHY Mode Register 1
145 mvs_write_port_vsr_addr(mvi
, phy_id
, VSR_REF_CLOCK_CRTL
);
146 tmp
= mvs_read_port_vsr_data(mvi
, phy_id
);
149 /* No defines in HBA_Info_Page */
151 mvs_write_port_vsr_data(mvi
, phy_id
, tmp
);
153 /* R110h DFE F0-F1 Coefficient Control/DFE Update Control
155 * DFE_UPDATE_EN [11:6]
158 mvs_write_port_vsr_addr(mvi
, phy_id
, VSR_PHY_DFE_UPDATE_CRTL
);
159 tmp
= mvs_read_port_vsr_data(mvi
, phy_id
);
162 /* No defines in HBA_Info_Page */
163 tmp
|= ((0x3F << 6) | (0x0 << 0));
164 mvs_write_port_vsr_data(mvi
, phy_id
, tmp
);
166 /* R1A0h Interface and Digital Reference Clock Control/Reserved_50h
170 mvs_write_port_vsr_addr(mvi
, phy_id
, VSR_REF_CLOCK_CRTL
);
171 tmp
= mvs_read_port_vsr_data(mvi
, phy_id
);
174 /* No defines in HBA_Info_Page */
176 mvs_write_port_vsr_data(mvi
, phy_id
, tmp
);
179 /*Notice: this function must be called when phy is disabled*/
180 void set_phy_rate(struct mvs_info
*mvi
, int phy_id
, u8 rate
)
182 union reg_phy_cfg phy_cfg
, phy_cfg_tmp
;
183 mvs_write_port_vsr_addr(mvi
, phy_id
, VSR_PHY_MODE2
);
184 phy_cfg_tmp
.v
= mvs_read_port_vsr_data(mvi
, phy_id
);
186 phy_cfg
.u
.disable_phy
= phy_cfg_tmp
.u
.disable_phy
;
187 phy_cfg
.u
.sas_support
= 1;
188 phy_cfg
.u
.sata_support
= 1;
189 phy_cfg
.u
.sata_host_mode
= 1;
193 /* support 1.5 Gbps */
194 phy_cfg
.u
.speed_support
= 1;
195 phy_cfg
.u
.snw_3_support
= 0;
196 phy_cfg
.u
.tx_lnk_parity
= 1;
197 phy_cfg
.u
.tx_spt_phs_lnk_rate
= 0x30;
201 /* support 1.5, 3.0 Gbps */
202 phy_cfg
.u
.speed_support
= 3;
203 phy_cfg
.u
.tx_spt_phs_lnk_rate
= 0x3c;
204 phy_cfg
.u
.tx_lgcl_lnk_rate
= 0x08;
208 /* support 1.5, 3.0, 6.0 Gbps */
209 phy_cfg
.u
.speed_support
= 7;
210 phy_cfg
.u
.snw_3_support
= 1;
211 phy_cfg
.u
.tx_lnk_parity
= 1;
212 phy_cfg
.u
.tx_spt_phs_lnk_rate
= 0x3f;
213 phy_cfg
.u
.tx_lgcl_lnk_rate
= 0x09;
216 mvs_write_port_vsr_data(mvi
, phy_id
, phy_cfg
.v
);
219 static void __devinit
220 mvs_94xx_config_reg_from_hba(struct mvs_info
*mvi
, int phy_id
)
223 temp
= (u32
)(*(u32
*)&mvi
->hba_info_param
.phy_tuning
[phy_id
]);
224 if (temp
== 0xFFFFFFFFL
) {
225 mvi
->hba_info_param
.phy_tuning
[phy_id
].trans_emp_amp
= 0x6;
226 mvi
->hba_info_param
.phy_tuning
[phy_id
].trans_amp
= 0x1A;
227 mvi
->hba_info_param
.phy_tuning
[phy_id
].trans_amp_adj
= 0x3;
230 temp
= (u8
)(*(u8
*)&mvi
->hba_info_param
.ffe_ctl
[phy_id
]);
232 switch (mvi
->pdev
->revision
) {
235 mvi
->hba_info_param
.ffe_ctl
[phy_id
].ffe_rss_sel
= 0x7;
236 mvi
->hba_info_param
.ffe_ctl
[phy_id
].ffe_cap_sel
= 0x7;
242 mvi
->hba_info_param
.ffe_ctl
[phy_id
].ffe_rss_sel
= 0x7;
243 mvi
->hba_info_param
.ffe_ctl
[phy_id
].ffe_cap_sel
= 0xC;
248 temp
= (u8
)(*(u8
*)&mvi
->hba_info_param
.phy_rate
[phy_id
]);
250 /*set default phy_rate = 6Gbps*/
251 mvi
->hba_info_param
.phy_rate
[phy_id
] = 0x2;
253 set_phy_tuning(mvi
, phy_id
,
254 mvi
->hba_info_param
.phy_tuning
[phy_id
]);
255 set_phy_ffe_tuning(mvi
, phy_id
,
256 mvi
->hba_info_param
.ffe_ctl
[phy_id
]);
257 set_phy_rate(mvi
, phy_id
,
258 mvi
->hba_info_param
.phy_rate
[phy_id
]);
261 static void __devinit
mvs_94xx_enable_xmt(struct mvs_info
*mvi
, int phy_id
)
263 void __iomem
*regs
= mvi
->regs
;
267 tmp
|= 1 << (phy_id
+ PCS_EN_PORT_XMT_SHIFT2
);
271 static void mvs_94xx_phy_reset(struct mvs_info
*mvi
, u32 phy_id
, int hard
)
275 if (hard
== MVS_PHY_TUNE
) {
276 mvs_write_port_cfg_addr(mvi
, phy_id
, PHYR_SATA_CTL
);
277 tmp
= mvs_read_port_cfg_data(mvi
, phy_id
);
278 mvs_write_port_cfg_data(mvi
, phy_id
, tmp
|0x20000000);
279 mvs_write_port_cfg_data(mvi
, phy_id
, tmp
|0x100000);
282 tmp
= mvs_read_port_irq_stat(mvi
, phy_id
);
283 tmp
&= ~PHYEV_RDY_CH
;
284 mvs_write_port_irq_stat(mvi
, phy_id
, tmp
);
286 tmp
= mvs_read_phy_ctl(mvi
, phy_id
);
288 mvs_write_phy_ctl(mvi
, phy_id
, tmp
);
290 tmp
= mvs_read_phy_ctl(mvi
, phy_id
);
293 } while ((tmp
& PHY_RST_HARD
) && delay
);
295 mv_dprintk("phy hard reset failed.\n");
297 tmp
= mvs_read_phy_ctl(mvi
, phy_id
);
299 mvs_write_phy_ctl(mvi
, phy_id
, tmp
);
303 static void mvs_94xx_phy_disable(struct mvs_info
*mvi
, u32 phy_id
)
306 mvs_write_port_vsr_addr(mvi
, phy_id
, VSR_PHY_MODE2
);
307 tmp
= mvs_read_port_vsr_data(mvi
, phy_id
);
308 mvs_write_port_vsr_data(mvi
, phy_id
, tmp
| 0x00800000);
311 static void mvs_94xx_phy_enable(struct mvs_info
*mvi
, u32 phy_id
)
316 revision
= mvi
->pdev
->revision
;
317 if (revision
== VANIR_A0_REV
) {
318 mvs_write_port_vsr_addr(mvi
, phy_id
, CMD_HOST_RD_DATA
);
319 mvs_write_port_vsr_data(mvi
, phy_id
, 0x8300ffc1);
321 if (revision
== VANIR_B0_REV
) {
322 mvs_write_port_vsr_addr(mvi
, phy_id
, CMD_APP_MEM_CTL
);
323 mvs_write_port_vsr_data(mvi
, phy_id
, 0x08001006);
324 mvs_write_port_vsr_addr(mvi
, phy_id
, CMD_HOST_RD_DATA
);
325 mvs_write_port_vsr_data(mvi
, phy_id
, 0x0000705f);
328 mvs_write_port_vsr_addr(mvi
, phy_id
, VSR_PHY_MODE2
);
329 tmp
= mvs_read_port_vsr_data(mvi
, phy_id
);
331 mvs_write_port_vsr_data(mvi
, phy_id
, tmp
& 0xfd7fffff);
334 static int __devinit
mvs_94xx_init(struct mvs_info
*mvi
)
336 void __iomem
*regs
= mvi
->regs
;
341 revision
= mvi
->pdev
->revision
;
342 mvs_show_pcie_usage(mvi
);
343 if (mvi
->flags
& MVF_FLAG_SOC
) {
344 tmp
= mr32(MVS_PHY_CTL
);
345 tmp
&= ~PCTL_PWR_OFF
;
346 tmp
|= PCTL_PHY_DSBL
;
347 mw32(MVS_PHY_CTL
, tmp
);
351 /* make sure RST is set; HBA_RST /should/ have done that for us */
352 cctl
= mr32(MVS_CTL
) & 0xFFFF;
356 mw32_f(MVS_CTL
, cctl
| CCTL_RST
);
358 if (mvi
->flags
& MVF_FLAG_SOC
) {
359 tmp
= mr32(MVS_PHY_CTL
);
360 tmp
&= ~PCTL_PWR_OFF
;
362 tmp
&= ~PCTL_PHY_DSBL
;
363 tmp
|= PCTL_LINK_RST
;
364 mw32(MVS_PHY_CTL
, tmp
);
366 tmp
&= ~PCTL_LINK_RST
;
367 mw32(MVS_PHY_CTL
, tmp
);
371 /* disable Multiplexing, enable phy implemented */
372 mw32(MVS_PORTS_IMP
, 0xFF);
374 if (revision
== VANIR_A0_REV
) {
375 mw32(MVS_PA_VSR_ADDR
, CMD_CMWK_OOB_DET
);
376 mw32(MVS_PA_VSR_PORT
, 0x00018080);
378 mw32(MVS_PA_VSR_ADDR
, VSR_PHY_MODE2
);
379 if (revision
== VANIR_A0_REV
|| revision
== VANIR_B0_REV
)
380 /* set 6G/3G/1.5G, multiplexing, without SSC */
381 mw32(MVS_PA_VSR_PORT
, 0x0084d4fe);
383 /* set 6G/3G/1.5G, multiplexing, with and without SSC */
384 mw32(MVS_PA_VSR_PORT
, 0x0084fffe);
386 if (revision
== VANIR_B0_REV
) {
387 mw32(MVS_PA_VSR_ADDR
, CMD_APP_MEM_CTL
);
388 mw32(MVS_PA_VSR_PORT
, 0x08001006);
389 mw32(MVS_PA_VSR_ADDR
, CMD_HOST_RD_DATA
);
390 mw32(MVS_PA_VSR_PORT
, 0x0000705f);
394 mw32(MVS_PCS
, 0); /* MVS_PCS */
395 mw32(MVS_STP_REG_SET_0
, 0);
396 mw32(MVS_STP_REG_SET_1
, 0);
401 /* disable non data frame retry */
402 tmp
= mvs_cr32(mvi
, CMD_SAS_CTL1
);
403 if ((revision
== VANIR_A0_REV
) ||
404 (revision
== VANIR_B0_REV
) ||
405 (revision
== VANIR_C0_REV
)) {
408 mvs_cw32(mvi
, CMD_SAS_CTL1
, tmp
);
411 /* set LED blink when IO*/
412 mw32(MVS_PA_VSR_ADDR
, VSR_PHY_ACT_LED
);
413 tmp
= mr32(MVS_PA_VSR_PORT
);
416 mw32(MVS_PA_VSR_PORT
, tmp
);
418 mw32(MVS_CMD_LIST_LO
, mvi
->slot_dma
);
419 mw32(MVS_CMD_LIST_HI
, (mvi
->slot_dma
>> 16) >> 16);
421 mw32(MVS_RX_FIS_LO
, mvi
->rx_fis_dma
);
422 mw32(MVS_RX_FIS_HI
, (mvi
->rx_fis_dma
>> 16) >> 16);
424 mw32(MVS_TX_CFG
, MVS_CHIP_SLOT_SZ
);
425 mw32(MVS_TX_LO
, mvi
->tx_dma
);
426 mw32(MVS_TX_HI
, (mvi
->tx_dma
>> 16) >> 16);
428 mw32(MVS_RX_CFG
, MVS_RX_RING_SZ
);
429 mw32(MVS_RX_LO
, mvi
->rx_dma
);
430 mw32(MVS_RX_HI
, (mvi
->rx_dma
>> 16) >> 16);
432 for (i
= 0; i
< mvi
->chip
->n_phy
; i
++) {
433 mvs_94xx_phy_disable(mvi
, i
);
434 /* set phy local SAS address */
435 mvs_set_sas_addr(mvi
, i
, CONFIG_ID_FRAME3
, CONFIG_ID_FRAME4
,
436 cpu_to_le64(mvi
->phy
[i
].dev_sas_addr
));
438 mvs_94xx_enable_xmt(mvi
, i
);
439 mvs_94xx_config_reg_from_hba(mvi
, i
);
440 mvs_94xx_phy_enable(mvi
, i
);
442 mvs_94xx_phy_reset(mvi
, i
, PHY_RST_HARD
);
444 mvs_94xx_detect_porttype(mvi
, i
);
447 if (mvi
->flags
& MVF_FLAG_SOC
) {
448 /* set select registers */
449 writel(0x0E008000, regs
+ 0x000);
450 writel(0x59000008, regs
+ 0x004);
451 writel(0x20, regs
+ 0x008);
452 writel(0x20, regs
+ 0x00c);
453 writel(0x20, regs
+ 0x010);
454 writel(0x20, regs
+ 0x014);
455 writel(0x20, regs
+ 0x018);
456 writel(0x20, regs
+ 0x01c);
458 for (i
= 0; i
< mvi
->chip
->n_phy
; i
++) {
459 /* clear phy int status */
460 tmp
= mvs_read_port_irq_stat(mvi
, i
);
461 tmp
&= ~PHYEV_SIG_FIS
;
462 mvs_write_port_irq_stat(mvi
, i
, tmp
);
464 /* set phy int mask */
465 tmp
= PHYEV_RDY_CH
| PHYEV_BROAD_CH
|
466 PHYEV_ID_DONE
| PHYEV_DCDR_ERR
| PHYEV_CRC_ERR
;
467 mvs_write_port_irq_mask(mvi
, i
, tmp
);
470 mvs_update_phyinfo(mvi
, i
, 1);
473 /* little endian for open address and command table, etc. */
474 cctl
= mr32(MVS_CTL
);
475 cctl
|= CCTL_ENDIAN_CMD
;
476 cctl
&= ~CCTL_ENDIAN_OPEN
;
477 cctl
|= CCTL_ENDIAN_RSP
;
478 mw32_f(MVS_CTL
, cctl
);
480 /* reset CMD queue */
483 tmp
&= ~PCS_SELF_CLEAR
;
486 * the max count is 0x1ff, while our max slot is 0x200,
487 * it will make count 0.
490 if (MVS_CHIP_SLOT_SZ
> 0x1ff)
491 mw32(MVS_INT_COAL
, 0x1ff | COAL_EN
);
493 mw32(MVS_INT_COAL
, MVS_CHIP_SLOT_SZ
| COAL_EN
);
495 /* default interrupt coalescing time is 128us */
496 tmp
= 0x10000 | interrupt_coalescing
;
497 mw32(MVS_INT_COAL_TMOUT
, tmp
);
499 /* ladies and gentlemen, start your engines */
501 mw32(MVS_TX_CFG
, MVS_CHIP_SLOT_SZ
| TX_EN
);
502 mw32(MVS_RX_CFG
, MVS_RX_RING_SZ
| RX_EN
);
503 /* enable CMD/CMPL_Q/RESP mode */
504 mw32(MVS_PCS
, PCS_SATA_RETRY_2
| PCS_FIS_RX_EN
|
505 PCS_CMD_EN
| PCS_CMD_STOP_ERR
);
507 /* enable completion queue interrupt */
508 tmp
= (CINT_PORT_MASK
| CINT_DONE
| CINT_MEM
| CINT_SRS
| CINT_CI_STOP
|
509 CINT_DMA_PCIE
| CINT_NON_SPEC_NCQ_ERROR
);
510 tmp
|= CINT_PHY_MASK
;
511 mw32(MVS_INT_MASK
, tmp
);
513 tmp
= mvs_cr32(mvi
, CMD_LINK_TIMER
);
515 mvs_cw32(mvi
, CMD_LINK_TIMER
, tmp
);
517 /* tune STP performance */
519 mvs_cw32(mvi
, CMD_PL_TIMER
, tmp
);
521 /* This can improve expander large block size seq write performance */
522 tmp
= mvs_cr32(mvi
, CMD_PORT_LAYER_TIMER1
);
524 mvs_cw32(mvi
, CMD_PORT_LAYER_TIMER1
, tmp
);
526 /* change the connection open-close behavior (bit 9)
527 * set bit8 to 1 for performance tuning */
528 tmp
= mvs_cr32(mvi
, CMD_SL_MODE0
);
530 /* set bit0 to 0 to enable retry for no_dest reject case */
532 mvs_cw32(mvi
, CMD_SL_MODE0
, tmp
);
534 /* Enable SRS interrupt */
535 mw32(MVS_INT_MASK_SRS_0
, 0xFFFF);
540 static int mvs_94xx_ioremap(struct mvs_info
*mvi
)
542 if (!mvs_ioremap(mvi
, 2, -1)) {
543 mvi
->regs_ex
= mvi
->regs
+ 0x10200;
544 mvi
->regs
+= 0x20000;
552 static void mvs_94xx_iounmap(struct mvs_info
*mvi
)
555 mvi
->regs
-= 0x20000;
558 mvs_iounmap(mvi
->regs
);
562 static void mvs_94xx_interrupt_enable(struct mvs_info
*mvi
)
564 void __iomem
*regs
= mvi
->regs_ex
;
567 tmp
= mr32(MVS_GBL_CTL
);
568 tmp
|= (IRQ_SAS_A
| IRQ_SAS_B
);
569 mw32(MVS_GBL_INT_STAT
, tmp
);
570 writel(tmp
, regs
+ 0x0C);
571 writel(tmp
, regs
+ 0x10);
572 writel(tmp
, regs
+ 0x14);
573 writel(tmp
, regs
+ 0x18);
574 mw32(MVS_GBL_CTL
, tmp
);
577 static void mvs_94xx_interrupt_disable(struct mvs_info
*mvi
)
579 void __iomem
*regs
= mvi
->regs_ex
;
582 tmp
= mr32(MVS_GBL_CTL
);
584 tmp
&= ~(IRQ_SAS_A
| IRQ_SAS_B
);
585 mw32(MVS_GBL_INT_STAT
, tmp
);
586 writel(tmp
, regs
+ 0x0C);
587 writel(tmp
, regs
+ 0x10);
588 writel(tmp
, regs
+ 0x14);
589 writel(tmp
, regs
+ 0x18);
590 mw32(MVS_GBL_CTL
, tmp
);
593 static u32
mvs_94xx_isr_status(struct mvs_info
*mvi
, int irq
)
595 void __iomem
*regs
= mvi
->regs_ex
;
597 if (!(mvi
->flags
& MVF_FLAG_SOC
)) {
598 stat
= mr32(MVS_GBL_INT_STAT
);
600 if (!(stat
& (IRQ_SAS_A
| IRQ_SAS_B
)))
606 static irqreturn_t
mvs_94xx_isr(struct mvs_info
*mvi
, int irq
, u32 stat
)
608 void __iomem
*regs
= mvi
->regs
;
610 if (((stat
& IRQ_SAS_A
) && mvi
->id
== 0) ||
611 ((stat
& IRQ_SAS_B
) && mvi
->id
== 1)) {
612 mw32_f(MVS_INT_STAT
, CINT_DONE
);
614 spin_lock(&mvi
->lock
);
616 spin_unlock(&mvi
->lock
);
621 static void mvs_94xx_command_active(struct mvs_info
*mvi
, u32 slot_idx
)
624 tmp
= mvs_cr32(mvi
, MVS_COMMAND_ACTIVE
+(slot_idx
>> 3));
625 if (tmp
&& 1 << (slot_idx
% 32)) {
626 mv_printk("command active %08X, slot [%x].\n", tmp
, slot_idx
);
627 mvs_cw32(mvi
, MVS_COMMAND_ACTIVE
+ (slot_idx
>> 3),
628 1 << (slot_idx
% 32));
631 MVS_COMMAND_ACTIVE
+ (slot_idx
>> 3));
632 } while (tmp
& 1 << (slot_idx
% 32));
636 void mvs_94xx_clear_srs_irq(struct mvs_info
*mvi
, u8 reg_set
, u8 clear_all
)
638 void __iomem
*regs
= mvi
->regs
;
642 tmp
= mr32(MVS_INT_STAT_SRS_0
);
644 mv_dprintk("check SRS 0 %08X.\n", tmp
);
645 mw32(MVS_INT_STAT_SRS_0
, tmp
);
647 tmp
= mr32(MVS_INT_STAT_SRS_1
);
649 mv_dprintk("check SRS 1 %08X.\n", tmp
);
650 mw32(MVS_INT_STAT_SRS_1
, tmp
);
654 tmp
= mr32(MVS_INT_STAT_SRS_1
);
656 tmp
= mr32(MVS_INT_STAT_SRS_0
);
658 if (tmp
& (1 << (reg_set
% 32))) {
659 mv_dprintk("register set 0x%x was stopped.\n", reg_set
);
661 mw32(MVS_INT_STAT_SRS_1
, 1 << (reg_set
% 32));
663 mw32(MVS_INT_STAT_SRS_0
, 1 << (reg_set
% 32));
668 static void mvs_94xx_issue_stop(struct mvs_info
*mvi
, enum mvs_port_type type
,
671 void __iomem
*regs
= mvi
->regs
;
673 mvs_94xx_clear_srs_irq(mvi
, 0, 1);
675 tmp
= mr32(MVS_INT_STAT
);
676 mw32(MVS_INT_STAT
, tmp
| CINT_CI_STOP
);
677 tmp
= mr32(MVS_PCS
) | 0xFF00;
681 static void mvs_94xx_non_spec_ncq_error(struct mvs_info
*mvi
)
683 void __iomem
*regs
= mvi
->regs
;
686 struct mvs_device
*device
;
688 err_0
= mr32(MVS_NON_NCQ_ERR_0
);
689 err_1
= mr32(MVS_NON_NCQ_ERR_1
);
691 mv_dprintk("non specific ncq error err_0:%x,err_1:%x.\n",
693 for (i
= 0; i
< 32; i
++) {
694 if (err_0
& bit(i
)) {
695 device
= mvs_find_dev_by_reg_set(mvi
, i
);
697 mvs_release_task(mvi
, device
->sas_device
);
699 if (err_1
& bit(i
)) {
700 device
= mvs_find_dev_by_reg_set(mvi
, i
+32);
702 mvs_release_task(mvi
, device
->sas_device
);
706 mw32(MVS_NON_NCQ_ERR_0
, err_0
);
707 mw32(MVS_NON_NCQ_ERR_1
, err_1
);
710 static void mvs_94xx_free_reg_set(struct mvs_info
*mvi
, u8
*tfs
)
712 void __iomem
*regs
= mvi
->regs
;
715 if (*tfs
== MVS_ID_NOT_MAPPED
)
718 mvi
->sata_reg_set
&= ~bit(reg_set
);
720 w_reg_set_enable(reg_set
, (u32
)mvi
->sata_reg_set
);
722 w_reg_set_enable(reg_set
, (u32
)(mvi
->sata_reg_set
>> 32));
724 *tfs
= MVS_ID_NOT_MAPPED
;
729 static u8
mvs_94xx_assign_reg_set(struct mvs_info
*mvi
, u8
*tfs
)
732 void __iomem
*regs
= mvi
->regs
;
734 if (*tfs
!= MVS_ID_NOT_MAPPED
)
737 i
= mv_ffc64(mvi
->sata_reg_set
);
739 mvi
->sata_reg_set
|= bit(i
);
740 w_reg_set_enable(i
, (u32
)(mvi
->sata_reg_set
>> 32));
744 mvi
->sata_reg_set
|= bit(i
);
745 w_reg_set_enable(i
, (u32
)mvi
->sata_reg_set
);
749 return MVS_ID_NOT_MAPPED
;
752 static void mvs_94xx_make_prd(struct scatterlist
*scatter
, int nr
, void *prd
)
755 struct scatterlist
*sg
;
756 struct mvs_prd
*buf_prd
= prd
;
757 struct mvs_prd_imt im_len
;
759 for_each_sg(scatter
, sg
, nr
, i
) {
760 buf_prd
->addr
= cpu_to_le64(sg_dma_address(sg
));
761 im_len
.len
= sg_dma_len(sg
);
762 buf_prd
->im_len
= cpu_to_le32(*(u32
*)&im_len
);
767 static int mvs_94xx_oob_done(struct mvs_info
*mvi
, int i
)
770 phy_st
= mvs_read_phy_ctl(mvi
, i
);
771 if (phy_st
& PHY_READY_MASK
)
776 static void mvs_94xx_get_dev_identify_frame(struct mvs_info
*mvi
, int port_id
,
777 struct sas_identify_frame
*id
)
782 for (i
= 0; i
< 7; i
++) {
783 mvs_write_port_cfg_addr(mvi
, port_id
,
784 CONFIG_ID_FRAME0
+ i
* 4);
785 id_frame
[i
] = cpu_to_le32(mvs_read_port_cfg_data(mvi
, port_id
));
787 memcpy(id
, id_frame
, 28);
790 static void mvs_94xx_get_att_identify_frame(struct mvs_info
*mvi
, int port_id
,
791 struct sas_identify_frame
*id
)
796 for (i
= 0; i
< 7; i
++) {
797 mvs_write_port_cfg_addr(mvi
, port_id
,
798 CONFIG_ATT_ID_FRAME0
+ i
* 4);
799 id_frame
[i
] = cpu_to_le32(mvs_read_port_cfg_data(mvi
, port_id
));
800 mv_dprintk("94xx phy %d atta frame %d %x.\n",
801 port_id
+ mvi
->id
* mvi
->chip
->n_phy
, i
, id_frame
[i
]);
803 memcpy(id
, id_frame
, 28);
806 static u32
mvs_94xx_make_dev_info(struct sas_identify_frame
*id
)
808 u32 att_dev_info
= 0;
810 att_dev_info
|= id
->dev_type
;
812 att_dev_info
|= PORT_DEV_STP_INIT
;
814 att_dev_info
|= PORT_DEV_SMP_INIT
;
816 att_dev_info
|= PORT_DEV_SSP_INIT
;
818 att_dev_info
|= PORT_DEV_STP_TRGT
;
820 att_dev_info
|= PORT_DEV_SMP_TRGT
;
822 att_dev_info
|= PORT_DEV_SSP_TRGT
;
824 att_dev_info
|= (u32
)id
->phy_id
<<24;
828 static u32
mvs_94xx_make_att_info(struct sas_identify_frame
*id
)
830 return mvs_94xx_make_dev_info(id
);
833 static void mvs_94xx_fix_phy_info(struct mvs_info
*mvi
, int i
,
834 struct sas_identify_frame
*id
)
836 struct mvs_phy
*phy
= &mvi
->phy
[i
];
837 struct asd_sas_phy
*sas_phy
= &phy
->sas_phy
;
838 mv_dprintk("get all reg link rate is 0x%x\n", phy
->phy_status
);
840 (phy
->phy_status
& PHY_NEG_SPP_PHYS_LINK_RATE_MASK
) >>
841 PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET
;
842 sas_phy
->linkrate
+= 0x8;
843 mv_dprintk("get link rate is %d\n", sas_phy
->linkrate
);
844 phy
->minimum_linkrate
= SAS_LINK_RATE_1_5_GBPS
;
845 phy
->maximum_linkrate
= SAS_LINK_RATE_6_0_GBPS
;
846 mvs_94xx_get_dev_identify_frame(mvi
, i
, id
);
847 phy
->dev_info
= mvs_94xx_make_dev_info(id
);
849 if (phy
->phy_type
& PORT_TYPE_SAS
) {
850 mvs_94xx_get_att_identify_frame(mvi
, i
, id
);
851 phy
->att_dev_info
= mvs_94xx_make_att_info(id
);
852 phy
->att_dev_sas_addr
= *(u64
*)id
->sas_addr
;
854 phy
->att_dev_info
= PORT_DEV_STP_TRGT
| 1;
857 /* enable spin up bit */
858 mvs_write_port_cfg_addr(mvi
, i
, PHYR_PHY_STAT
);
859 mvs_write_port_cfg_data(mvi
, i
, 0x04);
863 void mvs_94xx_phy_set_link_rate(struct mvs_info
*mvi
, u32 phy_id
,
864 struct sas_phy_linkrates
*rates
)
869 tmp
= mvs_read_phy_ctl(mvi
, phy_id
);
870 lrmax
= (rates
->maximum_linkrate
- SAS_LINK_RATE_1_5_GBPS
) << 12;
876 mvs_write_phy_ctl(mvi
, phy_id
, tmp
);
877 mvs_94xx_phy_reset(mvi
, phy_id
, PHY_RST_HARD
);
880 static void mvs_94xx_clear_active_cmds(struct mvs_info
*mvi
)
883 void __iomem
*regs
= mvi
->regs
;
884 tmp
= mr32(MVS_STP_REG_SET_0
);
885 mw32(MVS_STP_REG_SET_0
, 0);
886 mw32(MVS_STP_REG_SET_0
, tmp
);
887 tmp
= mr32(MVS_STP_REG_SET_1
);
888 mw32(MVS_STP_REG_SET_1
, 0);
889 mw32(MVS_STP_REG_SET_1
, tmp
);
893 u32
mvs_94xx_spi_read_data(struct mvs_info
*mvi
)
895 void __iomem
*regs
= mvi
->regs_ex
- 0x10200;
896 return mr32(SPI_RD_DATA_REG_94XX
);
899 void mvs_94xx_spi_write_data(struct mvs_info
*mvi
, u32 data
)
901 void __iomem
*regs
= mvi
->regs_ex
- 0x10200;
902 mw32(SPI_RD_DATA_REG_94XX
, data
);
906 int mvs_94xx_spi_buildcmd(struct mvs_info
*mvi
,
914 void __iomem
*regs
= mvi
->regs_ex
- 0x10200;
917 dwTmp
= ((u32
)cmd
<< 8) | ((u32
)length
<< 4);
919 dwTmp
|= SPI_CTRL_READ_94XX
;
921 if (addr
!= MV_MAX_U32
) {
922 mw32(SPI_ADDR_REG_94XX
, (addr
& 0x0003FFFFL
));
923 dwTmp
|= SPI_ADDR_VLD_94XX
;
931 int mvs_94xx_spi_issuecmd(struct mvs_info
*mvi
, u32 cmd
)
933 void __iomem
*regs
= mvi
->regs_ex
- 0x10200;
934 mw32(SPI_CTRL_REG_94XX
, cmd
| SPI_CTRL_SpiStart_94XX
);
939 int mvs_94xx_spi_waitdataready(struct mvs_info
*mvi
, u32 timeout
)
941 void __iomem
*regs
= mvi
->regs_ex
- 0x10200;
944 for (i
= 0; i
< timeout
; i
++) {
945 dwTmp
= mr32(SPI_CTRL_REG_94XX
);
946 if (!(dwTmp
& SPI_CTRL_SpiStart_94XX
))
954 void mvs_94xx_fix_dma(struct mvs_info
*mvi
, u32 phy_mask
,
955 int buf_len
, int from
, void *prd
)
958 struct mvs_prd
*buf_prd
= prd
;
960 struct mvs_prd_imt im_len
;
965 #define PRD_CHAINED_ENTRY 0x01
966 if ((mvi
->pdev
->revision
== VANIR_A0_REV
) ||
967 (mvi
->pdev
->revision
== VANIR_B0_REV
))
968 buf_dma
= (phy_mask
<= 0x08) ?
969 mvi
->bulk_buffer_dma
: mvi
->bulk_buffer_dma1
;
973 for (i
= from
; i
< MAX_SG_ENTRY
; i
++, ++buf_prd
) {
974 if (i
== MAX_SG_ENTRY
- 1) {
975 buf_prd
->addr
= cpu_to_le64(virt_to_phys(buf_prd
- 1));
977 im_len
.misc_ctl
= PRD_CHAINED_ENTRY
;
979 buf_prd
->addr
= cpu_to_le64(buf_dma
);
980 im_len
.len
= buf_len
;
982 buf_prd
->im_len
= cpu_to_le32(*(u32
*)&im_len
);
986 static void mvs_94xx_tune_interrupt(struct mvs_info
*mvi
, u32 time
)
988 void __iomem
*regs
= mvi
->regs
;
991 * the max count is 0x1ff, while our max slot is 0x200,
992 * it will make count 0.
995 mw32(MVS_INT_COAL
, 0);
996 mw32(MVS_INT_COAL_TMOUT
, 0x10000);
998 if (MVS_CHIP_SLOT_SZ
> 0x1ff)
999 mw32(MVS_INT_COAL
, 0x1ff|COAL_EN
);
1001 mw32(MVS_INT_COAL
, MVS_CHIP_SLOT_SZ
|COAL_EN
);
1003 tmp
= 0x10000 | time
;
1004 mw32(MVS_INT_COAL_TMOUT
, tmp
);
1009 const struct mvs_dispatch mvs_94xx_dispatch
= {
1016 mvs_94xx_isr_status
,
1017 mvs_94xx_interrupt_enable
,
1018 mvs_94xx_interrupt_disable
,
1021 mvs_read_port_cfg_data
,
1022 mvs_write_port_cfg_data
,
1023 mvs_write_port_cfg_addr
,
1024 mvs_read_port_vsr_data
,
1025 mvs_write_port_vsr_data
,
1026 mvs_write_port_vsr_addr
,
1027 mvs_read_port_irq_stat
,
1028 mvs_write_port_irq_stat
,
1029 mvs_read_port_irq_mask
,
1030 mvs_write_port_irq_mask
,
1031 mvs_94xx_command_active
,
1032 mvs_94xx_clear_srs_irq
,
1033 mvs_94xx_issue_stop
,
1037 mvs_94xx_assign_reg_set
,
1038 mvs_94xx_free_reg_set
,
1042 mvs_94xx_detect_porttype
,
1044 mvs_94xx_fix_phy_info
,
1046 mvs_94xx_phy_set_link_rate
,
1047 mvs_hw_max_link_rate
,
1048 mvs_94xx_phy_disable
,
1049 mvs_94xx_phy_enable
,
1052 mvs_94xx_clear_active_cmds
,
1053 mvs_94xx_spi_read_data
,
1054 mvs_94xx_spi_write_data
,
1055 mvs_94xx_spi_buildcmd
,
1056 mvs_94xx_spi_issuecmd
,
1057 mvs_94xx_spi_waitdataready
,
1059 mvs_94xx_tune_interrupt
,
1060 mvs_94xx_non_spec_ncq_error
,