2 * Marvell 88SE94xx hardware specific
4 * Copyright 2007 Red Hat, Inc.
5 * Copyright 2008 Marvell. <kewei@marvell.com>
6 * Copyright 2009-2011 Marvell. <yuxiangl@marvell.com>
8 * This file is licensed under GPLv2.
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License as
12 * published by the Free Software Foundation; version 2 of the
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
30 static void mvs_94xx_detect_porttype(struct mvs_info
*mvi
, int i
)
33 struct mvs_phy
*phy
= &mvi
->phy
[i
];
36 mvs_write_port_vsr_addr(mvi
, i
, VSR_PHY_MODE3
);
37 reg
= mvs_read_port_vsr_data(mvi
, i
);
38 phy_status
= ((reg
& 0x3f0000) >> 16) & 0xff;
39 phy
->phy_type
&= ~(PORT_TYPE_SAS
| PORT_TYPE_SATA
);
42 phy
->phy_type
|= PORT_TYPE_SAS
;
46 phy
->phy_type
|= PORT_TYPE_SATA
;
51 void set_phy_tuning(struct mvs_info
*mvi
, int phy_id
,
52 struct phy_tuning phy_tuning
)
54 u32 tmp
, setting_0
= 0, setting_1
= 0;
57 /* Remap information for B0 chip:
59 * R0Ch -> R118h[15:0] (Adapted DFE F3 - F5 coefficient)
60 * R0Dh -> R118h[31:16] (Generation 1 Setting 0)
61 * R0Eh -> R11Ch[15:0] (Generation 1 Setting 1)
62 * R0Fh -> R11Ch[31:16] (Generation 2 Setting 0)
63 * R10h -> R120h[15:0] (Generation 2 Setting 1)
64 * R11h -> R120h[31:16] (Generation 3 Setting 0)
65 * R12h -> R124h[15:0] (Generation 3 Setting 1)
66 * R13h -> R124h[31:16] (Generation 4 Setting 0 (Reserved))
69 /* A0 has a different set of registers */
70 if (mvi
->pdev
->revision
== VANIR_A0_REV
)
73 for (i
= 0; i
< 3; i
++) {
74 /* loop 3 times, set Gen 1, Gen 2, Gen 3 */
77 setting_0
= GENERATION_1_SETTING
;
78 setting_1
= GENERATION_1_2_SETTING
;
81 setting_0
= GENERATION_1_2_SETTING
;
82 setting_1
= GENERATION_2_3_SETTING
;
85 setting_0
= GENERATION_2_3_SETTING
;
86 setting_1
= GENERATION_3_4_SETTING
;
92 * Transmitter Emphasis Enable
93 * Transmitter Emphasis Amplitude
94 * Transmitter Amplitude
96 mvs_write_port_vsr_addr(mvi
, phy_id
, setting_0
);
97 tmp
= mvs_read_port_vsr_data(mvi
, phy_id
);
98 tmp
&= ~(0xFBE << 16);
99 tmp
|= (((phy_tuning
.trans_emp_en
<< 11) |
100 (phy_tuning
.trans_emp_amp
<< 7) |
101 (phy_tuning
.trans_amp
<< 1)) << 16);
102 mvs_write_port_vsr_data(mvi
, phy_id
, tmp
);
104 /* Set Transmitter Amplitude Adjust */
105 mvs_write_port_vsr_addr(mvi
, phy_id
, setting_1
);
106 tmp
= mvs_read_port_vsr_data(mvi
, phy_id
);
108 tmp
|= (phy_tuning
.trans_amp_adj
<< 14);
109 mvs_write_port_vsr_data(mvi
, phy_id
, tmp
);
113 void set_phy_ffe_tuning(struct mvs_info
*mvi
, int phy_id
,
114 struct ffe_control ffe
)
118 /* Don't run this if A0/B0 */
119 if ((mvi
->pdev
->revision
== VANIR_A0_REV
)
120 || (mvi
->pdev
->revision
== VANIR_B0_REV
))
123 /* FFE Resistor and Capacitor */
124 /* R10Ch DFE Resolution Control/Squelch and FFE Setting
130 mvs_write_port_vsr_addr(mvi
, phy_id
, VSR_PHY_FFE_CONTROL
);
131 tmp
= mvs_read_port_vsr_data(mvi
, phy_id
);
134 /* Read from HBA_Info_Page */
136 (ffe
.ffe_rss_sel
<< 4) |
137 (ffe
.ffe_cap_sel
<< 0));
139 mvs_write_port_vsr_data(mvi
, phy_id
, tmp
);
141 /* R064h PHY Mode Register 1
145 mvs_write_port_vsr_addr(mvi
, phy_id
, VSR_REF_CLOCK_CRTL
);
146 tmp
= mvs_read_port_vsr_data(mvi
, phy_id
);
149 /* No defines in HBA_Info_Page */
151 mvs_write_port_vsr_data(mvi
, phy_id
, tmp
);
153 /* R110h DFE F0-F1 Coefficient Control/DFE Update Control
155 * DFE_UPDATE_EN [11:6]
158 mvs_write_port_vsr_addr(mvi
, phy_id
, VSR_PHY_DFE_UPDATE_CRTL
);
159 tmp
= mvs_read_port_vsr_data(mvi
, phy_id
);
162 /* No defines in HBA_Info_Page */
163 tmp
|= ((0x3F << 6) | (0x0 << 0));
164 mvs_write_port_vsr_data(mvi
, phy_id
, tmp
);
166 /* R1A0h Interface and Digital Reference Clock Control/Reserved_50h
170 mvs_write_port_vsr_addr(mvi
, phy_id
, VSR_REF_CLOCK_CRTL
);
171 tmp
= mvs_read_port_vsr_data(mvi
, phy_id
);
174 /* No defines in HBA_Info_Page */
176 mvs_write_port_vsr_data(mvi
, phy_id
, tmp
);
179 /*Notice: this function must be called when phy is disabled*/
180 void set_phy_rate(struct mvs_info
*mvi
, int phy_id
, u8 rate
)
182 union reg_phy_cfg phy_cfg
, phy_cfg_tmp
;
183 mvs_write_port_vsr_addr(mvi
, phy_id
, VSR_PHY_MODE2
);
184 phy_cfg_tmp
.v
= mvs_read_port_vsr_data(mvi
, phy_id
);
186 phy_cfg
.u
.disable_phy
= phy_cfg_tmp
.u
.disable_phy
;
187 phy_cfg
.u
.sas_support
= 1;
188 phy_cfg
.u
.sata_support
= 1;
189 phy_cfg
.u
.sata_host_mode
= 1;
193 /* support 1.5 Gbps */
194 phy_cfg
.u
.speed_support
= 1;
195 phy_cfg
.u
.snw_3_support
= 0;
196 phy_cfg
.u
.tx_lnk_parity
= 1;
197 phy_cfg
.u
.tx_spt_phs_lnk_rate
= 0x30;
201 /* support 1.5, 3.0 Gbps */
202 phy_cfg
.u
.speed_support
= 3;
203 phy_cfg
.u
.tx_spt_phs_lnk_rate
= 0x3c;
204 phy_cfg
.u
.tx_lgcl_lnk_rate
= 0x08;
208 /* support 1.5, 3.0, 6.0 Gbps */
209 phy_cfg
.u
.speed_support
= 7;
210 phy_cfg
.u
.snw_3_support
= 1;
211 phy_cfg
.u
.tx_lnk_parity
= 1;
212 phy_cfg
.u
.tx_spt_phs_lnk_rate
= 0x3f;
213 phy_cfg
.u
.tx_lgcl_lnk_rate
= 0x09;
216 mvs_write_port_vsr_data(mvi
, phy_id
, phy_cfg
.v
);
219 static void mvs_94xx_config_reg_from_hba(struct mvs_info
*mvi
, int phy_id
)
222 temp
= (u32
)(*(u32
*)&mvi
->hba_info_param
.phy_tuning
[phy_id
]);
223 if (temp
== 0xFFFFFFFFL
) {
224 mvi
->hba_info_param
.phy_tuning
[phy_id
].trans_emp_amp
= 0x6;
225 mvi
->hba_info_param
.phy_tuning
[phy_id
].trans_amp
= 0x1A;
226 mvi
->hba_info_param
.phy_tuning
[phy_id
].trans_amp_adj
= 0x3;
229 temp
= (u8
)(*(u8
*)&mvi
->hba_info_param
.ffe_ctl
[phy_id
]);
231 switch (mvi
->pdev
->revision
) {
234 mvi
->hba_info_param
.ffe_ctl
[phy_id
].ffe_rss_sel
= 0x7;
235 mvi
->hba_info_param
.ffe_ctl
[phy_id
].ffe_cap_sel
= 0x7;
241 mvi
->hba_info_param
.ffe_ctl
[phy_id
].ffe_rss_sel
= 0x7;
242 mvi
->hba_info_param
.ffe_ctl
[phy_id
].ffe_cap_sel
= 0xC;
247 temp
= (u8
)(*(u8
*)&mvi
->hba_info_param
.phy_rate
[phy_id
]);
249 /*set default phy_rate = 6Gbps*/
250 mvi
->hba_info_param
.phy_rate
[phy_id
] = 0x2;
252 set_phy_tuning(mvi
, phy_id
,
253 mvi
->hba_info_param
.phy_tuning
[phy_id
]);
254 set_phy_ffe_tuning(mvi
, phy_id
,
255 mvi
->hba_info_param
.ffe_ctl
[phy_id
]);
256 set_phy_rate(mvi
, phy_id
,
257 mvi
->hba_info_param
.phy_rate
[phy_id
]);
260 static void mvs_94xx_enable_xmt(struct mvs_info
*mvi
, int phy_id
)
262 void __iomem
*regs
= mvi
->regs
;
266 tmp
|= 1 << (phy_id
+ PCS_EN_PORT_XMT_SHIFT2
);
270 static void mvs_94xx_phy_reset(struct mvs_info
*mvi
, u32 phy_id
, int hard
)
274 if (hard
== MVS_PHY_TUNE
) {
275 mvs_write_port_cfg_addr(mvi
, phy_id
, PHYR_SATA_CTL
);
276 tmp
= mvs_read_port_cfg_data(mvi
, phy_id
);
277 mvs_write_port_cfg_data(mvi
, phy_id
, tmp
|0x20000000);
278 mvs_write_port_cfg_data(mvi
, phy_id
, tmp
|0x100000);
281 tmp
= mvs_read_port_irq_stat(mvi
, phy_id
);
282 tmp
&= ~PHYEV_RDY_CH
;
283 mvs_write_port_irq_stat(mvi
, phy_id
, tmp
);
285 tmp
= mvs_read_phy_ctl(mvi
, phy_id
);
287 mvs_write_phy_ctl(mvi
, phy_id
, tmp
);
289 tmp
= mvs_read_phy_ctl(mvi
, phy_id
);
292 } while ((tmp
& PHY_RST_HARD
) && delay
);
294 mv_dprintk("phy hard reset failed.\n");
296 tmp
= mvs_read_phy_ctl(mvi
, phy_id
);
298 mvs_write_phy_ctl(mvi
, phy_id
, tmp
);
302 static void mvs_94xx_phy_disable(struct mvs_info
*mvi
, u32 phy_id
)
305 mvs_write_port_vsr_addr(mvi
, phy_id
, VSR_PHY_MODE2
);
306 tmp
= mvs_read_port_vsr_data(mvi
, phy_id
);
307 mvs_write_port_vsr_data(mvi
, phy_id
, tmp
| 0x00800000);
310 static void mvs_94xx_phy_enable(struct mvs_info
*mvi
, u32 phy_id
)
315 revision
= mvi
->pdev
->revision
;
316 if (revision
== VANIR_A0_REV
) {
317 mvs_write_port_vsr_addr(mvi
, phy_id
, CMD_HOST_RD_DATA
);
318 mvs_write_port_vsr_data(mvi
, phy_id
, 0x8300ffc1);
320 if (revision
== VANIR_B0_REV
) {
321 mvs_write_port_vsr_addr(mvi
, phy_id
, CMD_APP_MEM_CTL
);
322 mvs_write_port_vsr_data(mvi
, phy_id
, 0x08001006);
323 mvs_write_port_vsr_addr(mvi
, phy_id
, CMD_HOST_RD_DATA
);
324 mvs_write_port_vsr_data(mvi
, phy_id
, 0x0000705f);
327 mvs_write_port_vsr_addr(mvi
, phy_id
, VSR_PHY_MODE2
);
328 tmp
= mvs_read_port_vsr_data(mvi
, phy_id
);
330 mvs_write_port_vsr_data(mvi
, phy_id
, tmp
& 0xfd7fffff);
333 static int mvs_94xx_init(struct mvs_info
*mvi
)
335 void __iomem
*regs
= mvi
->regs
;
340 revision
= mvi
->pdev
->revision
;
341 mvs_show_pcie_usage(mvi
);
342 if (mvi
->flags
& MVF_FLAG_SOC
) {
343 tmp
= mr32(MVS_PHY_CTL
);
344 tmp
&= ~PCTL_PWR_OFF
;
345 tmp
|= PCTL_PHY_DSBL
;
346 mw32(MVS_PHY_CTL
, tmp
);
350 /* make sure RST is set; HBA_RST /should/ have done that for us */
351 cctl
= mr32(MVS_CTL
) & 0xFFFF;
355 mw32_f(MVS_CTL
, cctl
| CCTL_RST
);
357 if (mvi
->flags
& MVF_FLAG_SOC
) {
358 tmp
= mr32(MVS_PHY_CTL
);
359 tmp
&= ~PCTL_PWR_OFF
;
361 tmp
&= ~PCTL_PHY_DSBL
;
362 tmp
|= PCTL_LINK_RST
;
363 mw32(MVS_PHY_CTL
, tmp
);
365 tmp
&= ~PCTL_LINK_RST
;
366 mw32(MVS_PHY_CTL
, tmp
);
370 /* disable Multiplexing, enable phy implemented */
371 mw32(MVS_PORTS_IMP
, 0xFF);
373 if (revision
== VANIR_A0_REV
) {
374 mw32(MVS_PA_VSR_ADDR
, CMD_CMWK_OOB_DET
);
375 mw32(MVS_PA_VSR_PORT
, 0x00018080);
377 mw32(MVS_PA_VSR_ADDR
, VSR_PHY_MODE2
);
378 if (revision
== VANIR_A0_REV
|| revision
== VANIR_B0_REV
)
379 /* set 6G/3G/1.5G, multiplexing, without SSC */
380 mw32(MVS_PA_VSR_PORT
, 0x0084d4fe);
382 /* set 6G/3G/1.5G, multiplexing, with and without SSC */
383 mw32(MVS_PA_VSR_PORT
, 0x0084fffe);
385 if (revision
== VANIR_B0_REV
) {
386 mw32(MVS_PA_VSR_ADDR
, CMD_APP_MEM_CTL
);
387 mw32(MVS_PA_VSR_PORT
, 0x08001006);
388 mw32(MVS_PA_VSR_ADDR
, CMD_HOST_RD_DATA
);
389 mw32(MVS_PA_VSR_PORT
, 0x0000705f);
393 mw32(MVS_PCS
, 0); /* MVS_PCS */
394 mw32(MVS_STP_REG_SET_0
, 0);
395 mw32(MVS_STP_REG_SET_1
, 0);
400 /* disable non data frame retry */
401 tmp
= mvs_cr32(mvi
, CMD_SAS_CTL1
);
402 if ((revision
== VANIR_A0_REV
) ||
403 (revision
== VANIR_B0_REV
) ||
404 (revision
== VANIR_C0_REV
)) {
407 mvs_cw32(mvi
, CMD_SAS_CTL1
, tmp
);
410 /* set LED blink when IO*/
411 mw32(MVS_PA_VSR_ADDR
, VSR_PHY_ACT_LED
);
412 tmp
= mr32(MVS_PA_VSR_PORT
);
415 mw32(MVS_PA_VSR_PORT
, tmp
);
417 mw32(MVS_CMD_LIST_LO
, mvi
->slot_dma
);
418 mw32(MVS_CMD_LIST_HI
, (mvi
->slot_dma
>> 16) >> 16);
420 mw32(MVS_RX_FIS_LO
, mvi
->rx_fis_dma
);
421 mw32(MVS_RX_FIS_HI
, (mvi
->rx_fis_dma
>> 16) >> 16);
423 mw32(MVS_TX_CFG
, MVS_CHIP_SLOT_SZ
);
424 mw32(MVS_TX_LO
, mvi
->tx_dma
);
425 mw32(MVS_TX_HI
, (mvi
->tx_dma
>> 16) >> 16);
427 mw32(MVS_RX_CFG
, MVS_RX_RING_SZ
);
428 mw32(MVS_RX_LO
, mvi
->rx_dma
);
429 mw32(MVS_RX_HI
, (mvi
->rx_dma
>> 16) >> 16);
431 for (i
= 0; i
< mvi
->chip
->n_phy
; i
++) {
432 mvs_94xx_phy_disable(mvi
, i
);
433 /* set phy local SAS address */
434 mvs_set_sas_addr(mvi
, i
, CONFIG_ID_FRAME3
, CONFIG_ID_FRAME4
,
435 cpu_to_le64(mvi
->phy
[i
].dev_sas_addr
));
437 mvs_94xx_enable_xmt(mvi
, i
);
438 mvs_94xx_config_reg_from_hba(mvi
, i
);
439 mvs_94xx_phy_enable(mvi
, i
);
441 mvs_94xx_phy_reset(mvi
, i
, PHY_RST_HARD
);
443 mvs_94xx_detect_porttype(mvi
, i
);
446 if (mvi
->flags
& MVF_FLAG_SOC
) {
447 /* set select registers */
448 writel(0x0E008000, regs
+ 0x000);
449 writel(0x59000008, regs
+ 0x004);
450 writel(0x20, regs
+ 0x008);
451 writel(0x20, regs
+ 0x00c);
452 writel(0x20, regs
+ 0x010);
453 writel(0x20, regs
+ 0x014);
454 writel(0x20, regs
+ 0x018);
455 writel(0x20, regs
+ 0x01c);
457 for (i
= 0; i
< mvi
->chip
->n_phy
; i
++) {
458 /* clear phy int status */
459 tmp
= mvs_read_port_irq_stat(mvi
, i
);
460 tmp
&= ~PHYEV_SIG_FIS
;
461 mvs_write_port_irq_stat(mvi
, i
, tmp
);
463 /* set phy int mask */
464 tmp
= PHYEV_RDY_CH
| PHYEV_BROAD_CH
|
465 PHYEV_ID_DONE
| PHYEV_DCDR_ERR
| PHYEV_CRC_ERR
;
466 mvs_write_port_irq_mask(mvi
, i
, tmp
);
469 mvs_update_phyinfo(mvi
, i
, 1);
472 /* little endian for open address and command table, etc. */
473 cctl
= mr32(MVS_CTL
);
474 cctl
|= CCTL_ENDIAN_CMD
;
475 cctl
&= ~CCTL_ENDIAN_OPEN
;
476 cctl
|= CCTL_ENDIAN_RSP
;
477 mw32_f(MVS_CTL
, cctl
);
479 /* reset CMD queue */
482 tmp
&= ~PCS_SELF_CLEAR
;
485 * the max count is 0x1ff, while our max slot is 0x200,
486 * it will make count 0.
489 if (MVS_CHIP_SLOT_SZ
> 0x1ff)
490 mw32(MVS_INT_COAL
, 0x1ff | COAL_EN
);
492 mw32(MVS_INT_COAL
, MVS_CHIP_SLOT_SZ
| COAL_EN
);
494 /* default interrupt coalescing time is 128us */
495 tmp
= 0x10000 | interrupt_coalescing
;
496 mw32(MVS_INT_COAL_TMOUT
, tmp
);
498 /* ladies and gentlemen, start your engines */
500 mw32(MVS_TX_CFG
, MVS_CHIP_SLOT_SZ
| TX_EN
);
501 mw32(MVS_RX_CFG
, MVS_RX_RING_SZ
| RX_EN
);
502 /* enable CMD/CMPL_Q/RESP mode */
503 mw32(MVS_PCS
, PCS_SATA_RETRY_2
| PCS_FIS_RX_EN
|
504 PCS_CMD_EN
| PCS_CMD_STOP_ERR
);
506 /* enable completion queue interrupt */
507 tmp
= (CINT_PORT_MASK
| CINT_DONE
| CINT_MEM
| CINT_SRS
| CINT_CI_STOP
|
508 CINT_DMA_PCIE
| CINT_NON_SPEC_NCQ_ERROR
);
509 tmp
|= CINT_PHY_MASK
;
510 mw32(MVS_INT_MASK
, tmp
);
512 tmp
= mvs_cr32(mvi
, CMD_LINK_TIMER
);
514 mvs_cw32(mvi
, CMD_LINK_TIMER
, tmp
);
516 /* tune STP performance */
518 mvs_cw32(mvi
, CMD_PL_TIMER
, tmp
);
520 /* This can improve expander large block size seq write performance */
521 tmp
= mvs_cr32(mvi
, CMD_PORT_LAYER_TIMER1
);
523 mvs_cw32(mvi
, CMD_PORT_LAYER_TIMER1
, tmp
);
525 /* change the connection open-close behavior (bit 9)
526 * set bit8 to 1 for performance tuning */
527 tmp
= mvs_cr32(mvi
, CMD_SL_MODE0
);
529 /* set bit0 to 0 to enable retry for no_dest reject case */
531 mvs_cw32(mvi
, CMD_SL_MODE0
, tmp
);
533 /* Enable SRS interrupt */
534 mw32(MVS_INT_MASK_SRS_0
, 0xFFFF);
539 static int mvs_94xx_ioremap(struct mvs_info
*mvi
)
541 if (!mvs_ioremap(mvi
, 2, -1)) {
542 mvi
->regs_ex
= mvi
->regs
+ 0x10200;
543 mvi
->regs
+= 0x20000;
551 static void mvs_94xx_iounmap(struct mvs_info
*mvi
)
554 mvi
->regs
-= 0x20000;
557 mvs_iounmap(mvi
->regs
);
561 static void mvs_94xx_interrupt_enable(struct mvs_info
*mvi
)
563 void __iomem
*regs
= mvi
->regs_ex
;
566 tmp
= mr32(MVS_GBL_CTL
);
567 tmp
|= (IRQ_SAS_A
| IRQ_SAS_B
);
568 mw32(MVS_GBL_INT_STAT
, tmp
);
569 writel(tmp
, regs
+ 0x0C);
570 writel(tmp
, regs
+ 0x10);
571 writel(tmp
, regs
+ 0x14);
572 writel(tmp
, regs
+ 0x18);
573 mw32(MVS_GBL_CTL
, tmp
);
576 static void mvs_94xx_interrupt_disable(struct mvs_info
*mvi
)
578 void __iomem
*regs
= mvi
->regs_ex
;
581 tmp
= mr32(MVS_GBL_CTL
);
583 tmp
&= ~(IRQ_SAS_A
| IRQ_SAS_B
);
584 mw32(MVS_GBL_INT_STAT
, tmp
);
585 writel(tmp
, regs
+ 0x0C);
586 writel(tmp
, regs
+ 0x10);
587 writel(tmp
, regs
+ 0x14);
588 writel(tmp
, regs
+ 0x18);
589 mw32(MVS_GBL_CTL
, tmp
);
592 static u32
mvs_94xx_isr_status(struct mvs_info
*mvi
, int irq
)
594 void __iomem
*regs
= mvi
->regs_ex
;
596 if (!(mvi
->flags
& MVF_FLAG_SOC
)) {
597 stat
= mr32(MVS_GBL_INT_STAT
);
599 if (!(stat
& (IRQ_SAS_A
| IRQ_SAS_B
)))
605 static irqreturn_t
mvs_94xx_isr(struct mvs_info
*mvi
, int irq
, u32 stat
)
607 void __iomem
*regs
= mvi
->regs
;
609 if (((stat
& IRQ_SAS_A
) && mvi
->id
== 0) ||
610 ((stat
& IRQ_SAS_B
) && mvi
->id
== 1)) {
611 mw32_f(MVS_INT_STAT
, CINT_DONE
);
613 spin_lock(&mvi
->lock
);
615 spin_unlock(&mvi
->lock
);
620 static void mvs_94xx_command_active(struct mvs_info
*mvi
, u32 slot_idx
)
623 tmp
= mvs_cr32(mvi
, MVS_COMMAND_ACTIVE
+(slot_idx
>> 3));
624 if (tmp
&& 1 << (slot_idx
% 32)) {
625 mv_printk("command active %08X, slot [%x].\n", tmp
, slot_idx
);
626 mvs_cw32(mvi
, MVS_COMMAND_ACTIVE
+ (slot_idx
>> 3),
627 1 << (slot_idx
% 32));
630 MVS_COMMAND_ACTIVE
+ (slot_idx
>> 3));
631 } while (tmp
& 1 << (slot_idx
% 32));
635 void mvs_94xx_clear_srs_irq(struct mvs_info
*mvi
, u8 reg_set
, u8 clear_all
)
637 void __iomem
*regs
= mvi
->regs
;
641 tmp
= mr32(MVS_INT_STAT_SRS_0
);
643 mv_dprintk("check SRS 0 %08X.\n", tmp
);
644 mw32(MVS_INT_STAT_SRS_0
, tmp
);
646 tmp
= mr32(MVS_INT_STAT_SRS_1
);
648 mv_dprintk("check SRS 1 %08X.\n", tmp
);
649 mw32(MVS_INT_STAT_SRS_1
, tmp
);
653 tmp
= mr32(MVS_INT_STAT_SRS_1
);
655 tmp
= mr32(MVS_INT_STAT_SRS_0
);
657 if (tmp
& (1 << (reg_set
% 32))) {
658 mv_dprintk("register set 0x%x was stopped.\n", reg_set
);
660 mw32(MVS_INT_STAT_SRS_1
, 1 << (reg_set
% 32));
662 mw32(MVS_INT_STAT_SRS_0
, 1 << (reg_set
% 32));
667 static void mvs_94xx_issue_stop(struct mvs_info
*mvi
, enum mvs_port_type type
,
670 void __iomem
*regs
= mvi
->regs
;
672 mvs_94xx_clear_srs_irq(mvi
, 0, 1);
674 tmp
= mr32(MVS_INT_STAT
);
675 mw32(MVS_INT_STAT
, tmp
| CINT_CI_STOP
);
676 tmp
= mr32(MVS_PCS
) | 0xFF00;
680 static void mvs_94xx_non_spec_ncq_error(struct mvs_info
*mvi
)
682 void __iomem
*regs
= mvi
->regs
;
685 struct mvs_device
*device
;
687 err_0
= mr32(MVS_NON_NCQ_ERR_0
);
688 err_1
= mr32(MVS_NON_NCQ_ERR_1
);
690 mv_dprintk("non specific ncq error err_0:%x,err_1:%x.\n",
692 for (i
= 0; i
< 32; i
++) {
693 if (err_0
& bit(i
)) {
694 device
= mvs_find_dev_by_reg_set(mvi
, i
);
696 mvs_release_task(mvi
, device
->sas_device
);
698 if (err_1
& bit(i
)) {
699 device
= mvs_find_dev_by_reg_set(mvi
, i
+32);
701 mvs_release_task(mvi
, device
->sas_device
);
705 mw32(MVS_NON_NCQ_ERR_0
, err_0
);
706 mw32(MVS_NON_NCQ_ERR_1
, err_1
);
709 static void mvs_94xx_free_reg_set(struct mvs_info
*mvi
, u8
*tfs
)
711 void __iomem
*regs
= mvi
->regs
;
714 if (*tfs
== MVS_ID_NOT_MAPPED
)
717 mvi
->sata_reg_set
&= ~bit(reg_set
);
719 w_reg_set_enable(reg_set
, (u32
)mvi
->sata_reg_set
);
721 w_reg_set_enable(reg_set
, (u32
)(mvi
->sata_reg_set
>> 32));
723 *tfs
= MVS_ID_NOT_MAPPED
;
728 static u8
mvs_94xx_assign_reg_set(struct mvs_info
*mvi
, u8
*tfs
)
731 void __iomem
*regs
= mvi
->regs
;
733 if (*tfs
!= MVS_ID_NOT_MAPPED
)
736 i
= mv_ffc64(mvi
->sata_reg_set
);
738 mvi
->sata_reg_set
|= bit(i
);
739 w_reg_set_enable(i
, (u32
)(mvi
->sata_reg_set
>> 32));
743 mvi
->sata_reg_set
|= bit(i
);
744 w_reg_set_enable(i
, (u32
)mvi
->sata_reg_set
);
748 return MVS_ID_NOT_MAPPED
;
751 static void mvs_94xx_make_prd(struct scatterlist
*scatter
, int nr
, void *prd
)
754 struct scatterlist
*sg
;
755 struct mvs_prd
*buf_prd
= prd
;
756 struct mvs_prd_imt im_len
;
758 for_each_sg(scatter
, sg
, nr
, i
) {
759 buf_prd
->addr
= cpu_to_le64(sg_dma_address(sg
));
760 im_len
.len
= sg_dma_len(sg
);
761 buf_prd
->im_len
= cpu_to_le32(*(u32
*)&im_len
);
766 static int mvs_94xx_oob_done(struct mvs_info
*mvi
, int i
)
769 phy_st
= mvs_read_phy_ctl(mvi
, i
);
770 if (phy_st
& PHY_READY_MASK
)
775 static void mvs_94xx_get_dev_identify_frame(struct mvs_info
*mvi
, int port_id
,
776 struct sas_identify_frame
*id
)
781 for (i
= 0; i
< 7; i
++) {
782 mvs_write_port_cfg_addr(mvi
, port_id
,
783 CONFIG_ID_FRAME0
+ i
* 4);
784 id_frame
[i
] = cpu_to_le32(mvs_read_port_cfg_data(mvi
, port_id
));
786 memcpy(id
, id_frame
, 28);
789 static void mvs_94xx_get_att_identify_frame(struct mvs_info
*mvi
, int port_id
,
790 struct sas_identify_frame
*id
)
795 for (i
= 0; i
< 7; i
++) {
796 mvs_write_port_cfg_addr(mvi
, port_id
,
797 CONFIG_ATT_ID_FRAME0
+ i
* 4);
798 id_frame
[i
] = cpu_to_le32(mvs_read_port_cfg_data(mvi
, port_id
));
799 mv_dprintk("94xx phy %d atta frame %d %x.\n",
800 port_id
+ mvi
->id
* mvi
->chip
->n_phy
, i
, id_frame
[i
]);
802 memcpy(id
, id_frame
, 28);
805 static u32
mvs_94xx_make_dev_info(struct sas_identify_frame
*id
)
807 u32 att_dev_info
= 0;
809 att_dev_info
|= id
->dev_type
;
811 att_dev_info
|= PORT_DEV_STP_INIT
;
813 att_dev_info
|= PORT_DEV_SMP_INIT
;
815 att_dev_info
|= PORT_DEV_SSP_INIT
;
817 att_dev_info
|= PORT_DEV_STP_TRGT
;
819 att_dev_info
|= PORT_DEV_SMP_TRGT
;
821 att_dev_info
|= PORT_DEV_SSP_TRGT
;
823 att_dev_info
|= (u32
)id
->phy_id
<<24;
827 static u32
mvs_94xx_make_att_info(struct sas_identify_frame
*id
)
829 return mvs_94xx_make_dev_info(id
);
832 static void mvs_94xx_fix_phy_info(struct mvs_info
*mvi
, int i
,
833 struct sas_identify_frame
*id
)
835 struct mvs_phy
*phy
= &mvi
->phy
[i
];
836 struct asd_sas_phy
*sas_phy
= &phy
->sas_phy
;
837 mv_dprintk("get all reg link rate is 0x%x\n", phy
->phy_status
);
839 (phy
->phy_status
& PHY_NEG_SPP_PHYS_LINK_RATE_MASK
) >>
840 PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET
;
841 sas_phy
->linkrate
+= 0x8;
842 mv_dprintk("get link rate is %d\n", sas_phy
->linkrate
);
843 phy
->minimum_linkrate
= SAS_LINK_RATE_1_5_GBPS
;
844 phy
->maximum_linkrate
= SAS_LINK_RATE_6_0_GBPS
;
845 mvs_94xx_get_dev_identify_frame(mvi
, i
, id
);
846 phy
->dev_info
= mvs_94xx_make_dev_info(id
);
848 if (phy
->phy_type
& PORT_TYPE_SAS
) {
849 mvs_94xx_get_att_identify_frame(mvi
, i
, id
);
850 phy
->att_dev_info
= mvs_94xx_make_att_info(id
);
851 phy
->att_dev_sas_addr
= *(u64
*)id
->sas_addr
;
853 phy
->att_dev_info
= PORT_DEV_STP_TRGT
| 1;
856 /* enable spin up bit */
857 mvs_write_port_cfg_addr(mvi
, i
, PHYR_PHY_STAT
);
858 mvs_write_port_cfg_data(mvi
, i
, 0x04);
862 void mvs_94xx_phy_set_link_rate(struct mvs_info
*mvi
, u32 phy_id
,
863 struct sas_phy_linkrates
*rates
)
868 tmp
= mvs_read_phy_ctl(mvi
, phy_id
);
869 lrmax
= (rates
->maximum_linkrate
- SAS_LINK_RATE_1_5_GBPS
) << 12;
875 mvs_write_phy_ctl(mvi
, phy_id
, tmp
);
876 mvs_94xx_phy_reset(mvi
, phy_id
, PHY_RST_HARD
);
879 static void mvs_94xx_clear_active_cmds(struct mvs_info
*mvi
)
882 void __iomem
*regs
= mvi
->regs
;
883 tmp
= mr32(MVS_STP_REG_SET_0
);
884 mw32(MVS_STP_REG_SET_0
, 0);
885 mw32(MVS_STP_REG_SET_0
, tmp
);
886 tmp
= mr32(MVS_STP_REG_SET_1
);
887 mw32(MVS_STP_REG_SET_1
, 0);
888 mw32(MVS_STP_REG_SET_1
, tmp
);
892 u32
mvs_94xx_spi_read_data(struct mvs_info
*mvi
)
894 void __iomem
*regs
= mvi
->regs_ex
- 0x10200;
895 return mr32(SPI_RD_DATA_REG_94XX
);
898 void mvs_94xx_spi_write_data(struct mvs_info
*mvi
, u32 data
)
900 void __iomem
*regs
= mvi
->regs_ex
- 0x10200;
901 mw32(SPI_RD_DATA_REG_94XX
, data
);
905 int mvs_94xx_spi_buildcmd(struct mvs_info
*mvi
,
913 void __iomem
*regs
= mvi
->regs_ex
- 0x10200;
916 dwTmp
= ((u32
)cmd
<< 8) | ((u32
)length
<< 4);
918 dwTmp
|= SPI_CTRL_READ_94XX
;
920 if (addr
!= MV_MAX_U32
) {
921 mw32(SPI_ADDR_REG_94XX
, (addr
& 0x0003FFFFL
));
922 dwTmp
|= SPI_ADDR_VLD_94XX
;
930 int mvs_94xx_spi_issuecmd(struct mvs_info
*mvi
, u32 cmd
)
932 void __iomem
*regs
= mvi
->regs_ex
- 0x10200;
933 mw32(SPI_CTRL_REG_94XX
, cmd
| SPI_CTRL_SpiStart_94XX
);
938 int mvs_94xx_spi_waitdataready(struct mvs_info
*mvi
, u32 timeout
)
940 void __iomem
*regs
= mvi
->regs_ex
- 0x10200;
943 for (i
= 0; i
< timeout
; i
++) {
944 dwTmp
= mr32(SPI_CTRL_REG_94XX
);
945 if (!(dwTmp
& SPI_CTRL_SpiStart_94XX
))
953 void mvs_94xx_fix_dma(struct mvs_info
*mvi
, u32 phy_mask
,
954 int buf_len
, int from
, void *prd
)
957 struct mvs_prd
*buf_prd
= prd
;
959 struct mvs_prd_imt im_len
;
964 #define PRD_CHAINED_ENTRY 0x01
965 if ((mvi
->pdev
->revision
== VANIR_A0_REV
) ||
966 (mvi
->pdev
->revision
== VANIR_B0_REV
))
967 buf_dma
= (phy_mask
<= 0x08) ?
968 mvi
->bulk_buffer_dma
: mvi
->bulk_buffer_dma1
;
972 for (i
= from
; i
< MAX_SG_ENTRY
; i
++, ++buf_prd
) {
973 if (i
== MAX_SG_ENTRY
- 1) {
974 buf_prd
->addr
= cpu_to_le64(virt_to_phys(buf_prd
- 1));
976 im_len
.misc_ctl
= PRD_CHAINED_ENTRY
;
978 buf_prd
->addr
= cpu_to_le64(buf_dma
);
979 im_len
.len
= buf_len
;
981 buf_prd
->im_len
= cpu_to_le32(*(u32
*)&im_len
);
985 static void mvs_94xx_tune_interrupt(struct mvs_info
*mvi
, u32 time
)
987 void __iomem
*regs
= mvi
->regs
;
990 * the max count is 0x1ff, while our max slot is 0x200,
991 * it will make count 0.
994 mw32(MVS_INT_COAL
, 0);
995 mw32(MVS_INT_COAL_TMOUT
, 0x10000);
997 if (MVS_CHIP_SLOT_SZ
> 0x1ff)
998 mw32(MVS_INT_COAL
, 0x1ff|COAL_EN
);
1000 mw32(MVS_INT_COAL
, MVS_CHIP_SLOT_SZ
|COAL_EN
);
1002 tmp
= 0x10000 | time
;
1003 mw32(MVS_INT_COAL_TMOUT
, tmp
);
1008 const struct mvs_dispatch mvs_94xx_dispatch
= {
1015 mvs_94xx_isr_status
,
1016 mvs_94xx_interrupt_enable
,
1017 mvs_94xx_interrupt_disable
,
1020 mvs_read_port_cfg_data
,
1021 mvs_write_port_cfg_data
,
1022 mvs_write_port_cfg_addr
,
1023 mvs_read_port_vsr_data
,
1024 mvs_write_port_vsr_data
,
1025 mvs_write_port_vsr_addr
,
1026 mvs_read_port_irq_stat
,
1027 mvs_write_port_irq_stat
,
1028 mvs_read_port_irq_mask
,
1029 mvs_write_port_irq_mask
,
1030 mvs_94xx_command_active
,
1031 mvs_94xx_clear_srs_irq
,
1032 mvs_94xx_issue_stop
,
1036 mvs_94xx_assign_reg_set
,
1037 mvs_94xx_free_reg_set
,
1041 mvs_94xx_detect_porttype
,
1043 mvs_94xx_fix_phy_info
,
1045 mvs_94xx_phy_set_link_rate
,
1046 mvs_hw_max_link_rate
,
1047 mvs_94xx_phy_disable
,
1048 mvs_94xx_phy_enable
,
1051 mvs_94xx_clear_active_cmds
,
1052 mvs_94xx_spi_read_data
,
1053 mvs_94xx_spi_write_data
,
1054 mvs_94xx_spi_buildcmd
,
1055 mvs_94xx_spi_issuecmd
,
1056 mvs_94xx_spi_waitdataready
,
1058 mvs_94xx_tune_interrupt
,
1059 mvs_94xx_non_spec_ncq_error
,